Split sync module into multiple submodules, other changes to sync (#2086)

This commit is contained in:
Rua 2022-11-15 09:05:03 +01:00 committed by GitHub
parent 6de6050764
commit 9bc69940d0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 1062 additions and 709 deletions

View File

@ -52,8 +52,12 @@ mod linux {
SwapchainPresentInfo,
},
sync::{
now, ExternalSemaphoreHandleType, ExternalSemaphoreHandleTypes, FlushError, GpuFuture,
Semaphore, SemaphoreCreateInfo,
now,
semaphore::{
ExternalSemaphoreHandleType, ExternalSemaphoreHandleTypes, Semaphore,
SemaphoreCreateInfo,
},
FlushError, GpuFuture,
},
VulkanLibrary,
};

View File

@ -42,7 +42,7 @@ use vulkano::{
},
render_pass::{Framebuffer, FramebufferCreateInfo, Subpass},
swapchain::{PresentMode, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo},
sync::{FenceSignalFuture, GpuFuture},
sync::{future::FenceSignalFuture, GpuFuture},
VulkanLibrary,
};
use vulkano_win::VkSurfaceBuild;

View File

@ -25,7 +25,7 @@ use crate::{
MemoryPropertyFlags, MemoryRequirements,
},
range_map::RangeMap,
sync::{AccessError, CurrentAccess, Sharing},
sync::{future::AccessError, CurrentAccess, Sharing},
DeviceSize, RequirementNotMet, RequiresOneOf, Version, VulkanError, VulkanObject,
};
use parking_lot::{Mutex, MutexGuard};

View File

@ -27,7 +27,7 @@ use crate::{
image::{sys::Image, ImageAccess, ImageAspects, ImageLayout, ImageSubresourceRange},
query::{QueryControlFlags, QueryType},
render_pass::{Framebuffer, Subpass},
sync::{AccessCheckError, AccessFlags, PipelineMemoryAccess, PipelineStages},
sync::{future::AccessCheckError, AccessFlags, PipelineMemoryAccess, PipelineStages},
DeviceSize, OomError, RequirementNotMet, RequiresOneOf, VulkanObject,
};
use ahash::HashMap;

View File

@ -328,6 +328,7 @@ where
}
// VUID-vkCmdBindPipeline-pipeline-00781
// TODO:
Ok(())
}
@ -1218,7 +1219,7 @@ impl UnsafeCommandBufferBuilderBindVertexBuffer {
}
#[derive(Clone, Debug)]
enum BindPushError {
pub(in super::super) enum BindPushError {
DescriptorSetUpdateError(DescriptorSetUpdateError),
RequirementNotMet {

View File

@ -2912,7 +2912,7 @@ impl UnsafeCommandBufferBuilder {
#[derive(Clone, Debug)]
#[allow(dead_code)]
enum SetDynamicStateError {
pub(in super::super) enum SetDynamicStateError {
RequirementNotMet {
required_for: &'static str,
requires_one_of: RequiresOneOf,

View File

@ -285,7 +285,7 @@ where
assert_eq!(device, query_pool.device());
// VUID-vkCmdWriteTimestamp2-stage-03860
if !queue_family_properties.supports_stage(stage) {
if !PipelineStages::from(queue_family_properties.queue_flags).contains_enum(stage) {
return Err(QueryError::StageNotSupported);
}

View File

@ -552,88 +552,7 @@ where
&mut self,
mut rendering_info: RenderingInfo,
) -> Result<&mut Self, RenderPassError> {
{
let RenderingInfo {
render_area_offset,
ref mut render_area_extent,
ref mut layer_count,
view_mask,
ref color_attachments,
ref depth_attachment,
ref stencil_attachment,
contents: _,
_ne: _,
} = rendering_info;
let auto_extent = render_area_extent[0] == 0 || render_area_extent[1] == 0;
let auto_layers = *layer_count == 0;
// Set the values based on the attachment sizes.
if auto_extent || auto_layers {
if auto_extent {
*render_area_extent = [u32::MAX, u32::MAX];
}
if auto_layers {
if view_mask != 0 {
*layer_count = 1;
} else {
*layer_count = u32::MAX;
}
}
for image_view in (color_attachments.iter().flatten())
.chain(depth_attachment.iter())
.chain(stencil_attachment.iter())
.flat_map(|attachment_info| {
Some(&attachment_info.image_view).into_iter().chain(
attachment_info
.resolve_info
.as_ref()
.map(|resolve_info| &resolve_info.image_view),
)
})
{
if auto_extent {
let extent = image_view.dimensions().width_height();
for i in 0..2 {
render_area_extent[i] = min(render_area_extent[i], extent[i]);
}
}
if auto_layers {
let subresource_range = image_view.subresource_range();
let array_layers = subresource_range.array_layers.end
- subresource_range.array_layers.start;
*layer_count = min(*layer_count, array_layers);
}
}
if auto_extent {
if *render_area_extent == [u32::MAX, u32::MAX] {
return Err(RenderPassError::AutoExtentAttachmentsEmpty);
}
// Subtract the offset from the calculated max extent.
// If there is an underflow, then the offset is too large, and validation should
// catch that later.
for i in 0..2 {
render_area_extent[i] = render_area_extent[i]
.checked_sub(render_area_offset[i])
.unwrap_or(1);
}
}
if auto_layers {
if *layer_count == u32::MAX {
return Err(RenderPassError::AutoLayersAttachmentsEmpty);
}
}
}
}
rendering_info.set_extent_layers()?;
self.validate_begin_rendering(&mut rendering_info)?;
unsafe {
@ -2367,6 +2286,92 @@ impl Default for RenderingInfo {
}
}
impl RenderingInfo {
pub(crate) fn set_extent_layers(&mut self) -> Result<(), RenderPassError> {
let &mut RenderingInfo {
render_area_offset,
ref mut render_area_extent,
ref mut layer_count,
view_mask,
ref color_attachments,
ref depth_attachment,
ref stencil_attachment,
contents: _,
_ne: _,
} = self;
let auto_extent = render_area_extent[0] == 0 || render_area_extent[1] == 0;
let auto_layers = *layer_count == 0;
// Set the values based on the attachment sizes.
if auto_extent || auto_layers {
if auto_extent {
*render_area_extent = [u32::MAX, u32::MAX];
}
if auto_layers {
if view_mask != 0 {
*layer_count = 1;
} else {
*layer_count = u32::MAX;
}
}
for image_view in (color_attachments.iter().flatten())
.chain(depth_attachment.iter())
.chain(stencil_attachment.iter())
.flat_map(|attachment_info| {
Some(&attachment_info.image_view).into_iter().chain(
attachment_info
.resolve_info
.as_ref()
.map(|resolve_info| &resolve_info.image_view),
)
})
{
if auto_extent {
let extent = image_view.dimensions().width_height();
for i in 0..2 {
render_area_extent[i] = min(render_area_extent[i], extent[i]);
}
}
if auto_layers {
let subresource_range = image_view.subresource_range();
let array_layers =
subresource_range.array_layers.end - subresource_range.array_layers.start;
*layer_count = min(*layer_count, array_layers);
}
}
if auto_extent {
if *render_area_extent == [u32::MAX, u32::MAX] {
return Err(RenderPassError::AutoExtentAttachmentsEmpty);
}
// Subtract the offset from the calculated max extent.
// If there is an underflow, then the offset is too large, and validation should
// catch that later.
for i in 0..2 {
render_area_extent[i] = render_area_extent[i]
.checked_sub(render_area_offset[i])
.unwrap_or(1);
}
}
if auto_layers {
if *layer_count == u32::MAX {
return Err(RenderPassError::AutoLayersAttachmentsEmpty);
}
}
}
Ok(())
}
}
/// Parameters to specify properties of an attachment.
#[derive(Clone, Debug)]
pub struct RenderingAttachmentInfo {

View File

@ -14,8 +14,8 @@ use crate::{
},
image::ImageLayout,
sync::{
BufferMemoryBarrier, DependencyInfo, Event, ImageMemoryBarrier, MemoryBarrier,
PipelineStages,
event::Event, AccessFlags, BufferMemoryBarrier, DependencyFlags, DependencyInfo,
ImageMemoryBarrier, MemoryBarrier, PipelineStages,
},
Version, VulkanObject,
};
@ -106,13 +106,15 @@ impl UnsafeCommandBufferBuilder {
}
let DependencyInfo {
mut dependency_flags,
memory_barriers,
buffer_memory_barriers,
image_memory_barriers,
_ne: _,
} = dependency_info;
let dependency_flags = ash::vk::DependencyFlags::BY_REGION;
// TODO: Is this needed?
dependency_flags |= DependencyFlags::BY_REGION;
if self.device.enabled_features().synchronization2 {
let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers
@ -126,8 +128,8 @@ impl UnsafeCommandBufferBuilder {
_ne: _,
} = barrier;
debug_assert!(src_stages.supported_access().contains(src_access));
debug_assert!(dst_stages.supported_access().contains(dst_access));
debug_assert!(AccessFlags::from(src_stages).contains(src_access));
debug_assert!(AccessFlags::from(dst_stages).contains(dst_access));
ash::vk::MemoryBarrier2 {
src_stage_mask: src_stages.into(),
@ -147,30 +149,30 @@ impl UnsafeCommandBufferBuilder {
src_access,
dst_stages,
dst_access,
queue_family_transfer,
queue_family_ownership_transfer,
ref buffer,
ref range,
_ne: _,
} = barrier;
debug_assert!(src_stages.supported_access().contains(src_access));
debug_assert!(dst_stages.supported_access().contains(dst_access));
debug_assert!(AccessFlags::from(src_stages).contains(src_access));
debug_assert!(AccessFlags::from(dst_stages).contains(dst_access));
debug_assert!(!range.is_empty());
debug_assert!(range.end <= buffer.size());
let (src_queue_family_index, dst_queue_family_index) =
queue_family_ownership_transfer.map_or(
(ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED),
Into::into,
);
ash::vk::BufferMemoryBarrier2 {
src_stage_mask: src_stages.into(),
src_access_mask: src_access.into(),
dst_stage_mask: dst_stages.into(),
dst_access_mask: dst_access.into(),
src_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.source_index
}),
dst_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.destination_index
}),
src_queue_family_index,
dst_queue_family_index,
buffer: buffer.handle(),
offset: range.start,
size: range.end - range.start,
@ -189,14 +191,14 @@ impl UnsafeCommandBufferBuilder {
dst_access,
old_layout,
new_layout,
queue_family_transfer,
queue_family_ownership_transfer,
ref image,
ref subresource_range,
_ne: _,
} = barrier;
debug_assert!(src_stages.supported_access().contains(src_access));
debug_assert!(dst_stages.supported_access().contains(dst_access));
debug_assert!(AccessFlags::from(src_stages).contains(src_access));
debug_assert!(AccessFlags::from(dst_stages).contains(dst_access));
debug_assert!(!matches!(
new_layout,
ImageLayout::Undefined | ImageLayout::Preinitialized
@ -213,6 +215,12 @@ impl UnsafeCommandBufferBuilder {
subresource_range.array_layers.end <= image.dimensions().array_layers()
);
let (src_queue_family_index, dst_queue_family_index) =
queue_family_ownership_transfer.map_or(
(ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED),
Into::into,
);
ash::vk::ImageMemoryBarrier2 {
src_stage_mask: src_stages.into(),
src_access_mask: src_access.into(),
@ -220,14 +228,8 @@ impl UnsafeCommandBufferBuilder {
dst_access_mask: dst_access.into(),
old_layout: old_layout.into(),
new_layout: new_layout.into(),
src_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.source_index
}),
dst_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.destination_index
}),
src_queue_family_index,
dst_queue_family_index,
image: image.handle(),
subresource_range: subresource_range.clone().into(),
..Default::default()
@ -236,7 +238,7 @@ impl UnsafeCommandBufferBuilder {
.collect();
let dependency_info_vk = ash::vk::DependencyInfo {
dependency_flags,
dependency_flags: dependency_flags.into(),
memory_barrier_count: memory_barriers_vk.len() as u32,
p_memory_barriers: memory_barriers_vk.as_ptr(),
buffer_memory_barrier_count: buffer_memory_barriers_vk.len() as u32,
@ -272,8 +274,8 @@ impl UnsafeCommandBufferBuilder {
_ne: _,
} = barrier;
debug_assert!(src_stages.supported_access().contains(src_access));
debug_assert!(dst_stages.supported_access().contains(dst_access));
debug_assert!(AccessFlags::from(src_stages).contains(src_access));
debug_assert!(AccessFlags::from(dst_stages).contains(dst_access));
src_stage_mask |= src_stages.into();
dst_stage_mask |= dst_stages.into();
@ -294,31 +296,31 @@ impl UnsafeCommandBufferBuilder {
src_access,
dst_stages,
dst_access,
queue_family_transfer,
queue_family_ownership_transfer,
ref buffer,
ref range,
_ne: _,
} = barrier;
debug_assert!(src_stages.supported_access().contains(src_access));
debug_assert!(dst_stages.supported_access().contains(dst_access));
debug_assert!(AccessFlags::from(src_stages).contains(src_access));
debug_assert!(AccessFlags::from(dst_stages).contains(dst_access));
debug_assert!(!range.is_empty());
debug_assert!(range.end <= buffer.size());
src_stage_mask |= src_stages.into();
dst_stage_mask |= dst_stages.into();
let (src_queue_family_index, dst_queue_family_index) =
queue_family_ownership_transfer.map_or(
(ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED),
Into::into,
);
ash::vk::BufferMemoryBarrier {
src_access_mask: src_access.into(),
dst_access_mask: dst_access.into(),
src_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.source_index
}),
dst_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.destination_index
}),
src_queue_family_index,
dst_queue_family_index,
buffer: buffer.handle(),
offset: range.start,
size: range.end - range.start,
@ -337,14 +339,14 @@ impl UnsafeCommandBufferBuilder {
dst_access,
old_layout,
new_layout,
queue_family_transfer,
queue_family_ownership_transfer,
ref image,
ref subresource_range,
_ne: _,
} = barrier;
debug_assert!(src_stages.supported_access().contains(src_access));
debug_assert!(dst_stages.supported_access().contains(dst_access));
debug_assert!(AccessFlags::from(src_stages).contains(src_access));
debug_assert!(AccessFlags::from(dst_stages).contains(dst_access));
debug_assert!(!matches!(
new_layout,
ImageLayout::Undefined | ImageLayout::Preinitialized
@ -364,19 +366,19 @@ impl UnsafeCommandBufferBuilder {
src_stage_mask |= src_stages.into();
dst_stage_mask |= dst_stages.into();
let (src_queue_family_index, dst_queue_family_index) =
queue_family_ownership_transfer.map_or(
(ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED),
Into::into,
);
ash::vk::ImageMemoryBarrier {
src_access_mask: src_access.into(),
dst_access_mask: dst_access.into(),
old_layout: old_layout.into(),
new_layout: new_layout.into(),
src_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.source_index
}),
dst_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.destination_index
}),
src_queue_family_index,
dst_queue_family_index,
image: image.handle(),
subresource_range: subresource_range.clone().into(),
..Default::default()
@ -401,7 +403,7 @@ impl UnsafeCommandBufferBuilder {
self.handle,
src_stage_mask,
dst_stage_mask,
dependency_flags,
dependency_flags.into(),
memory_barriers_vk.len() as u32,
memory_barriers_vk.as_ptr(),
buffer_memory_barriers_vk.len() as u32,
@ -415,14 +417,16 @@ impl UnsafeCommandBufferBuilder {
/// Calls `vkCmdSetEvent` on the builder.
#[inline]
pub unsafe fn set_event(&mut self, event: &Event, dependency_info: &DependencyInfo) {
let DependencyInfo {
memory_barriers,
buffer_memory_barriers,
image_memory_barriers,
let &DependencyInfo {
mut dependency_flags,
ref memory_barriers,
ref buffer_memory_barriers,
ref image_memory_barriers,
_ne: _,
} = dependency_info;
let dependency_flags = ash::vk::DependencyFlags::BY_REGION;
// TODO: Is this needed?
dependency_flags |= DependencyFlags::BY_REGION;
let fns = self.device.fns();
@ -456,25 +460,25 @@ impl UnsafeCommandBufferBuilder {
src_access,
dst_stages,
dst_access,
queue_family_transfer,
queue_family_ownership_transfer,
ref buffer,
ref range,
_ne: _,
} = barrier;
let (src_queue_family_index, dst_queue_family_index) =
queue_family_ownership_transfer.map_or(
(ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED),
Into::into,
);
ash::vk::BufferMemoryBarrier2 {
src_stage_mask: src_stages.into(),
src_access_mask: src_access.into(),
dst_stage_mask: dst_stages.into(),
dst_access_mask: dst_access.into(),
src_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.source_index
}),
dst_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.destination_index
}),
src_queue_family_index,
dst_queue_family_index,
buffer: buffer.handle(),
offset: range.start,
size: range.end - range.start,
@ -493,12 +497,18 @@ impl UnsafeCommandBufferBuilder {
dst_access,
old_layout,
new_layout,
queue_family_transfer,
queue_family_ownership_transfer,
ref image,
ref subresource_range,
_ne: _,
} = barrier;
let (src_queue_family_index, dst_queue_family_index) =
queue_family_ownership_transfer.map_or(
(ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED),
Into::into,
);
ash::vk::ImageMemoryBarrier2 {
src_stage_mask: src_stages.into(),
src_access_mask: src_access.into(),
@ -506,14 +516,8 @@ impl UnsafeCommandBufferBuilder {
dst_access_mask: dst_access.into(),
old_layout: old_layout.into(),
new_layout: new_layout.into(),
src_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.source_index
}),
dst_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.destination_index
}),
src_queue_family_index,
dst_queue_family_index,
image: image.handle(),
subresource_range: subresource_range.clone().into(),
..Default::default()
@ -522,7 +526,7 @@ impl UnsafeCommandBufferBuilder {
.collect();
let dependency_info_vk = ash::vk::DependencyInfo {
dependency_flags,
dependency_flags: dependency_flags.into(),
memory_barrier_count: memory_barriers_vk.len() as u32,
p_memory_barriers: memory_barriers_vk.as_ptr(),
buffer_memory_barrier_count: buffer_memory_barriers_vk.len() as u32,
@ -590,14 +594,16 @@ impl UnsafeCommandBufferBuilder {
let mut per_dependency_info_vk: SmallVec<[_; 4]> = SmallVec::new();
for (event, dependency_info) in events {
let DependencyInfo {
memory_barriers,
buffer_memory_barriers,
image_memory_barriers,
let &DependencyInfo {
mut dependency_flags,
ref memory_barriers,
ref buffer_memory_barriers,
ref image_memory_barriers,
_ne: _,
} = dependency_info;
let dependency_flags = ash::vk::DependencyFlags::BY_REGION;
// TODO: Is this needed?
dependency_flags |= DependencyFlags::BY_REGION;
let memory_barriers_vk: SmallVec<[_; 2]> = memory_barriers
.into_iter()
@ -628,25 +634,25 @@ impl UnsafeCommandBufferBuilder {
src_access,
dst_stages,
dst_access,
queue_family_transfer,
queue_family_ownership_transfer,
ref buffer,
ref range,
_ne: _,
} = barrier;
let (src_queue_family_index, dst_queue_family_index) =
queue_family_ownership_transfer.map_or(
(ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED),
Into::into,
);
ash::vk::BufferMemoryBarrier2 {
src_stage_mask: src_stages.into(),
src_access_mask: src_access.into(),
dst_stage_mask: dst_stages.into(),
dst_access_mask: dst_access.into(),
src_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.source_index
}),
dst_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.destination_index
}),
src_queue_family_index,
dst_queue_family_index,
buffer: buffer.handle(),
offset: range.start,
size: range.end - range.start,
@ -665,12 +671,18 @@ impl UnsafeCommandBufferBuilder {
dst_access,
old_layout,
new_layout,
queue_family_transfer,
queue_family_ownership_transfer,
ref image,
ref subresource_range,
_ne: _,
} = barrier;
let (src_queue_family_index, dst_queue_family_index) =
queue_family_ownership_transfer.map_or(
(ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED),
Into::into,
);
ash::vk::ImageMemoryBarrier2 {
src_stage_mask: src_stages.into(),
src_access_mask: src_access.into(),
@ -678,14 +690,8 @@ impl UnsafeCommandBufferBuilder {
dst_access_mask: dst_access.into(),
old_layout: old_layout.into(),
new_layout: new_layout.into(),
src_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.source_index
}),
dst_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.destination_index
}),
src_queue_family_index,
dst_queue_family_index,
image: image.handle(),
subresource_range: subresource_range.clone().into(),
..Default::default()
@ -695,7 +701,7 @@ impl UnsafeCommandBufferBuilder {
events_vk.push(event.handle());
dependency_infos_vk.push(ash::vk::DependencyInfo {
dependency_flags,
dependency_flags: dependency_flags.into(),
memory_barrier_count: 0,
p_memory_barriers: ptr::null(),
buffer_memory_barrier_count: 0,
@ -756,10 +762,11 @@ impl UnsafeCommandBufferBuilder {
for (event, dependency_info) in events {
let events_vk = [event.handle()];
let DependencyInfo {
memory_barriers,
buffer_memory_barriers,
image_memory_barriers,
let &DependencyInfo {
dependency_flags: _,
ref memory_barriers,
ref buffer_memory_barriers,
ref image_memory_barriers,
_ne: _,
} = dependency_info;
@ -796,7 +803,7 @@ impl UnsafeCommandBufferBuilder {
src_access,
dst_stages,
dst_access,
queue_family_transfer,
queue_family_ownership_transfer,
ref buffer,
ref range,
_ne: _,
@ -805,17 +812,17 @@ impl UnsafeCommandBufferBuilder {
src_stage_mask |= src_stages.into();
dst_stage_mask |= dst_stages.into();
let (src_queue_family_index, dst_queue_family_index) =
queue_family_ownership_transfer.map_or(
(ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED),
Into::into,
);
ash::vk::BufferMemoryBarrier {
src_access_mask: src_access.into(),
dst_access_mask: dst_access.into(),
src_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.source_index
}),
dst_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.destination_index
}),
src_queue_family_index,
dst_queue_family_index,
buffer: buffer.handle(),
offset: range.start,
size: range.end - range.start,
@ -834,7 +841,7 @@ impl UnsafeCommandBufferBuilder {
dst_access,
old_layout,
new_layout,
queue_family_transfer,
queue_family_ownership_transfer,
ref image,
ref subresource_range,
_ne: _,
@ -843,19 +850,19 @@ impl UnsafeCommandBufferBuilder {
src_stage_mask |= src_stages.into();
dst_stage_mask |= dst_stages.into();
let (src_queue_family_index, dst_queue_family_index) =
queue_family_ownership_transfer.map_or(
(ash::vk::QUEUE_FAMILY_IGNORED, ash::vk::QUEUE_FAMILY_IGNORED),
Into::into,
);
ash::vk::ImageMemoryBarrier {
src_access_mask: src_access.into(),
dst_access_mask: dst_access.into(),
old_layout: old_layout.into(),
new_layout: new_layout.into(),
src_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.source_index
}),
dst_queue_family_index: queue_family_transfer
.map_or(ash::vk::QUEUE_FAMILY_IGNORED, |transfer| {
transfer.destination_index
}),
src_queue_family_index,
dst_queue_family_index,
image: image.handle(),
subresource_range: subresource_range.clone().into(),
..Default::default()

View File

@ -127,7 +127,7 @@ use crate::{
query::{QueryControlFlags, QueryPipelineStatisticFlags},
range_map::RangeMap,
render_pass::{Framebuffer, Subpass},
sync::{AccessFlags, PipelineStages, Semaphore},
sync::{semaphore::Semaphore, AccessFlags, PipelineStages},
DeviceSize,
};
use bytemuck::{Pod, Zeroable};

View File

@ -205,7 +205,7 @@ impl SyncCommandBufferBuilder {
ref range,
ref memory,
} => {
debug_assert!(memory.stages.supported_access().contains(memory.access));
debug_assert!(AccessFlags::from(memory.stages).contains(memory.access));
if let Some(conflicting_use) =
self.find_buffer_conflict(buffer, range.clone(), memory)
@ -226,7 +226,7 @@ impl SyncCommandBufferBuilder {
end_layout,
} => {
debug_assert!(memory.exclusive || start_layout == end_layout);
debug_assert!(memory.stages.supported_access().contains(memory.access));
debug_assert!(AccessFlags::from(memory.stages).contains(memory.access));
debug_assert!(end_layout != ImageLayout::Undefined);
debug_assert!(end_layout != ImageLayout::Preinitialized);

View File

@ -78,7 +78,10 @@ use crate::{
buffer::{sys::Buffer, BufferAccess},
device::{Device, DeviceOwned, Queue},
image::{sys::Image, ImageAccess, ImageLayout, ImageSubresourceRange},
sync::{AccessCheckError, AccessError, AccessFlags, PipelineMemoryAccess, PipelineStages},
sync::{
future::{AccessCheckError, AccessError},
AccessFlags, PipelineMemoryAccess, PipelineStages,
},
DeviceSize,
};
use ahash::HashMap;

View File

@ -17,8 +17,10 @@ use crate::{
image::{sys::Image, ImageAccess, ImageLayout, ImageSubresourceRange},
swapchain::Swapchain,
sync::{
now, AccessCheckError, AccessError, AccessFlags, FlushError, GpuFuture, NowFuture,
PipelineMemoryAccess, PipelineStages, SubmitAnyBuilder,
future::{
now, AccessCheckError, AccessError, FlushError, GpuFuture, NowFuture, SubmitAnyBuilder,
},
AccessFlags, PipelineMemoryAccess, PipelineStages,
},
DeviceSize, SafeDeref, VulkanObject,
};

View File

@ -25,8 +25,8 @@ use crate::{
SurfaceInfo, SurfaceTransforms,
},
sync::{
ExternalFenceInfo, ExternalFenceProperties, ExternalSemaphoreInfo,
ExternalSemaphoreProperties,
fence::{ExternalFenceInfo, ExternalFenceProperties},
semaphore::{ExternalSemaphoreInfo, ExternalSemaphoreProperties},
},
ExtensionProperties, RequirementNotMet, RequiresOneOf, Version, VulkanError, VulkanObject,
};

View File

@ -22,7 +22,9 @@ use crate::{
},
swapchain::{PresentInfo, SwapchainPresentInfo},
sync::{
AccessCheckError, Fence, FenceState, FlushError, GpuFuture, PipelineStage, SemaphoreState,
fence::{Fence, FenceState},
future::{AccessCheckError, FlushError, GpuFuture},
semaphore::SemaphoreState,
},
OomError, RequirementNotMet, RequiresOneOf, Version, VulkanError, VulkanObject,
};
@ -1588,14 +1590,6 @@ pub struct QueueFamilyProperties {
pub min_image_transfer_granularity: [u32; 3],
}
impl QueueFamilyProperties {
/// Returns whether the queues of this family support a particular pipeline stage.
#[inline]
pub fn supports_stage(&self, stage: PipelineStage) -> bool {
self.queue_flags.contains(stage.required_queue_flags())
}
}
impl From<ash::vk::QueueFamilyProperties> for QueueFamilyProperties {
#[inline]
fn from(val: ash::vk::QueueFamilyProperties) -> Self {
@ -1645,6 +1639,13 @@ vulkan_bitflags! {
VIDEO_ENCODE = VIDEO_ENCODE_KHR {
device_extensions: [khr_video_encode_queue],
},
/*
/// Queues of this family can execute optical flow operations.
OPTICAL_FLOW = OPTICAL_FLOW_NV {
device_extensions: [nv_optical_flow],
},
*/
}
/// Error that can happen when submitting work to a queue.
@ -1700,7 +1701,7 @@ impl From<RequirementNotMet> for QueueError {
#[cfg(test)]
mod tests {
use crate::sync::Fence;
use crate::sync::fence::Fence;
use std::{sync::Arc, time::Duration};
#[test]

View File

@ -34,7 +34,7 @@ use crate::{
},
range_map::RangeMap,
swapchain::Swapchain,
sync::{AccessError, CurrentAccess, Sharing},
sync::{future::AccessError, CurrentAccess, Sharing},
DeviceSize, RequirementNotMet, RequiresOneOf, Version, VulkanError, VulkanObject,
};
use parking_lot::{Mutex, MutexGuard};

View File

@ -99,7 +99,7 @@ use crate::{
buffer::{sys::RawBuffer, BufferAccess},
image::{sys::RawImage, ImageAccess, ImageAspects},
macros::vulkan_bitflags,
sync::Semaphore,
sync::semaphore::Semaphore,
DeviceSize,
};
use std::{num::NonZeroU64, sync::Arc};

View File

@ -15,7 +15,7 @@ use crate::{
device::Device,
format::FormatFeatures,
image::{ImageAspects, ImageLayout, SampleCount},
sync::PipelineStages,
sync::{AccessFlags, DependencyFlags, PipelineStages},
OomError, RequirementNotMet, RequiresOneOf, Version, VulkanError, VulkanObject,
};
use smallvec::SmallVec;
@ -633,8 +633,8 @@ impl RenderPass {
dst_stages,
src_access,
dst_access,
by_region,
view_local,
dependency_flags,
view_offset,
_ne: _,
} = dependency;
let dependency_num = dependency_num as u32;
@ -875,7 +875,7 @@ impl RenderPass {
// VUID-VkSubpassDependency2-srcAccessMask-03088
// VUID-VkSubpassDependency2-dstAccessMask-03089
if !stages.supported_access().contains(access) {
if !AccessFlags::from(stages).contains(access) {
return Err(
RenderPassCreationError::DependencyAccessNotSupportedByStages {
dependency: dependency_num,
@ -884,13 +884,24 @@ impl RenderPass {
}
}
// VUID-VkRenderPassCreateInfo2-viewMask-03059
if view_local.is_some() && !is_multiview {
return Err(
RenderPassCreationError::DependencyViewLocalMultiviewNotEnabled {
dependency: dependency_num,
},
);
if dependency_flags.intersects(DependencyFlags::VIEW_LOCAL) {
// VUID-VkRenderPassCreateInfo2-viewMask-03059
if !is_multiview {
return Err(
RenderPassCreationError::DependencyViewLocalMultiviewNotEnabled {
dependency: dependency_num,
},
);
}
} else {
// VUID-VkSubpassDependency2-dependencyFlags-03092
if view_offset != 0 {
return Err(
RenderPassCreationError::DependencyViewOffzetNonzeroWithoutViewLocal {
dependency: dependency_num,
},
);
}
}
// VUID-VkSubpassDependency2-srcSubpass-03085
@ -937,7 +948,7 @@ impl RenderPass {
} else {
// VUID-VkSubpassDependency2-dependencyFlags-03090
// VUID-VkSubpassDependency2-dependencyFlags-03091
if view_local.is_some() {
if dependency_flags.intersects(DependencyFlags::VIEW_LOCAL) {
return Err(
RenderPassCreationError::DependencyViewLocalExternalDependency {
dependency: dependency_num,
@ -977,7 +988,7 @@ impl RenderPass {
// VUID-VkSubpassDependency2-srcSubpass-02245
if src_stages.intersects(framebuffer_stages)
&& dst_stages.intersects(framebuffer_stages)
&& !by_region
&& !dependency_flags.intersects(DependencyFlags::BY_REGION)
{
return Err(
RenderPassCreationError::DependencySelfDependencyFramebufferStagesWithoutByRegion {
@ -986,11 +997,11 @@ impl RenderPass {
);
}
if let Some(view_offset) = view_local {
if dependency_flags.intersects(DependencyFlags::VIEW_LOCAL) {
// VUID-VkSubpassDependency2-viewOffset-02530
if view_offset != 0 {
return Err(
RenderPassCreationError::DependencySelfDependencyViewLocalNonzeroOffset {
RenderPassCreationError::DependencySelfDependencyViewLocalNonzeroViewOffset {
dependency: dependency_num,
},
);
@ -1172,16 +1183,6 @@ impl RenderPass {
.iter()
.enumerate()
.map(|(index, dependency)| {
let mut dependency_flags = ash::vk::DependencyFlags::empty();
if dependency.by_region {
dependency_flags |= ash::vk::DependencyFlags::BY_REGION;
}
if dependency.view_local.is_some() {
dependency_flags |= ash::vk::DependencyFlags::VIEW_LOCAL;
}
ash::vk::SubpassDependency2 {
p_next: memory_barriers_vk
.get(index)
@ -1192,9 +1193,9 @@ impl RenderPass {
dst_stage_mask: dependency.dst_stages.into(),
src_access_mask: dependency.src_access.into(),
dst_access_mask: dependency.dst_access.into(),
dependency_flags,
dependency_flags: dependency.dependency_flags.into(),
// VUID-VkSubpassDependency2-dependencyFlags-03092
view_offset: dependency.view_local.unwrap_or(0),
view_offset: dependency.view_offset,
..Default::default()
}
})
@ -1371,11 +1372,7 @@ impl RenderPass {
dst_stage_mask: dependency.dst_stages.into(),
src_access_mask: dependency.src_access.into(),
dst_access_mask: dependency.dst_access.into(),
dependency_flags: if dependency.by_region {
ash::vk::DependencyFlags::BY_REGION
} else {
ash::vk::DependencyFlags::empty()
},
dependency_flags: dependency.dependency_flags.into(),
})
.collect::<SmallVec<[_; 4]>>();
@ -1427,7 +1424,7 @@ impl RenderPass {
subpasses.iter().map(|subpass| subpass.view_mask).collect(),
dependencies
.iter()
.map(|dependency| dependency.view_local.unwrap_or(0))
.map(|dependency| dependency.view_offset)
.collect(),
)
} else {
@ -1549,7 +1546,7 @@ pub enum RenderPassCreationError {
/// A subpass dependency specifies a subpass self-dependency and has the `view_local` dependency
/// enabled, but the inner offset value was not 0.
DependencySelfDependencyViewLocalNonzeroOffset { dependency: u32 },
DependencySelfDependencyViewLocalNonzeroViewOffset { dependency: u32 },
/// A subpass dependency specifies a subpass self-dependency without the `view_local`
/// dependency, but the referenced subpass has more than one bit set in its `view_mask`.
@ -1566,14 +1563,24 @@ pub enum RenderPassCreationError {
/// render pass.
DependencySubpassOutOfRange { dependency: u32, subpass: u32 },
/// A subpass dependency has the `view_local` dependency enabled, but `src_subpass` or
/// In a subpass dependency, `dependency_flags` contains [`VIEW_LOCAL`], but `src_subpass` or
/// `dst_subpass` were set to `None`.
///
/// [`VIEW_LOCAL`]: crate::sync::DependencyFlags::VIEW_LOCAL
DependencyViewLocalExternalDependency { dependency: u32 },
/// A subpass dependency has the `view_local` dependency enabled, but multiview is not enabled
/// on the render pass.
/// In a subpass dependency, `dependency_flags` contains [`VIEW_LOCAL`], but multiview is not
/// enabled on the render pass.
///
/// [`VIEW_LOCAL`]: crate::sync::DependencyFlags::VIEW_LOCAL
DependencyViewLocalMultiviewNotEnabled { dependency: u32 },
/// In a subpass dependency, `view_offset` is not zero, but `dependency_flags` does not contain
/// [`VIEW_LOCAL`].
///
/// [`VIEW_LOCAL`]: crate::sync::DependencyFlags::VIEW_LOCAL
DependencyViewOffzetNonzeroWithoutViewLocal { dependency: u32 },
/// A reference to an attachment used other than as an input attachment in a subpass has
/// one or more aspects selected.
SubpassAttachmentAspectsNotEmpty { subpass: u32, attachment: u32 },
@ -1740,7 +1747,7 @@ impl Display for RenderPassCreationError {
dependency,
)
}
Self::DependencySelfDependencyViewLocalNonzeroOffset { dependency } => write!(
Self::DependencySelfDependencyViewLocalNonzeroViewOffset { dependency } => write!(
f,
"subpass dependency {} specifies a subpass self-dependency and has the \
`view_local` dependency enabled, but the inner offset value was not 0",
@ -1785,14 +1792,20 @@ impl Display for RenderPassCreationError {
),
Self::DependencyViewLocalExternalDependency { dependency } => write!(
f,
"subpass dependency {} has the `view_local` dependency enabled, but \
"in subpass dependency {}, `dependency_flags` contains `VIEW_LOCAL`, but \
`src_subpass` or `dst_subpass` were set to `None`",
dependency,
),
Self::DependencyViewLocalMultiviewNotEnabled { dependency } => write!(
f,
"subpass dependency {} has the `view_local` dependency enabled, but multiview is \
not enabled on the render pass",
"in subpass dependency {}, `dependency_flags` contains `VIEW_LOCAL`, but \
multiview is not enabled on the render pass",
dependency,
),
Self::DependencyViewOffzetNonzeroWithoutViewLocal { dependency } => write!(
f,
"in subpass dependency {}, `view_offset` is not zero, but `dependency_flags` does \
not contain `VIEW_LOCAL`",
dependency,
),
Self::SubpassAttachmentAspectsNotEmpty {

View File

@ -168,7 +168,8 @@ macro_rules! ordered_passes_renderpass {
dst_stages,
src_access,
dst_access,
by_region: true, // TODO: correct values
// TODO: correct values
dependency_flags: $crate::sync::DependencyFlags::BY_REGION,
..Default::default()
}
})

View File

@ -35,7 +35,7 @@ use crate::{
image::{ImageAspects, ImageLayout, SampleCount},
macros::{vulkan_bitflags_enum, vulkan_enum},
shader::ShaderInterface,
sync::{AccessFlags, PipelineStages},
sync::{AccessFlags, DependencyFlags, PipelineStages},
Version, VulkanObject,
};
use std::{cmp::max, mem::MaybeUninit, num::NonZeroU64, ptr, sync::Arc};
@ -1037,34 +1037,33 @@ pub struct SubpassDependency {
/// The default value is [`AccessFlags::empty()`].
pub dst_access: AccessFlags,
/// If false, then the source operations must be fully finished for the destination operations
/// to start. If true, then the implementation can start the destination operation for some
/// given pixels as long as the source operation is finished for these given pixels.
/// Dependency flags that modify behavior of the subpass dependency.
///
/// In other words, if the previous subpass has some side effects on other parts of an
/// attachment, then you should set it to false.
/// If a `src_subpass` equals `dst_subpass`, then:
/// - If `src_stages` and `dst_stages` both contain framebuffer-space stages,
/// this must include [`BY_REGION`].
/// - If the subpass's `view_mask` has more than one view,
/// this must include [`VIEW_LOCAL`].
///
/// Passing `false` is always safer than passing `true`, but in practice you rarely need to
/// pass `false`.
/// The default value is [`DependencyFlags::empty()`].
///
/// The default value is `false`.
pub by_region: bool,
/// [`BY_REGION`]: crate::sync::DependencyFlags::BY_REGION
/// [`VIEW_LOCAL`]: crate::sync::DependencyFlags::VIEW_LOCAL
pub dependency_flags: DependencyFlags,
/// If multiview rendering is being used (the subpasses have a nonzero `view_mask`), then
/// setting this to `Some` creates a view-local dependency, between views in `src_subpass`
/// and views in `dst_subpass`.
///
/// The inner value specifies an offset relative to the view index of `dst_subpass`:
/// each view `d` in `dst_subpass` depends on view `d + view_offset` in
/// If multiview rendering is being used (the subpasses have a nonzero `view_mask`), and
/// `dependency_flags` includes [`VIEW_LOCAL`], specifies an offset relative to the view index
/// of `dst_subpass`: each view `d` in `dst_subpass` depends on view `d + view_offset` in
/// `src_subpass`. If the source view index does not exist, the dependency is ignored for
/// that view.
///
/// If multiview rendering is not being used, the value must be `None`. If `src_subpass`
/// and `dst_subpass` are the same, only `Some(0)` and `None` are allowed as values, and
/// if that subpass also has multiple bits set in its `view_mask`, the value must be `Some(0)`.
/// If `dependency_flags` does not include [`VIEW_LOCAL`], or if `src_subpass` and
/// `dst_subpass` are the same, the value must be `0`.
///
/// The default value is `None`.
pub view_local: Option<i32>,
/// The default value is `0`.
///
/// [`VIEW_LOCAL`]: crate::sync::DependencyFlags::VIEW_LOCAL
pub view_offset: i32,
pub _ne: crate::NonExhaustive,
}
@ -1079,8 +1078,8 @@ impl Default for SubpassDependency {
dst_stages: PipelineStages::empty(),
src_access: AccessFlags::empty(),
dst_access: AccessFlags::empty(),
by_region: false,
view_local: None,
dependency_flags: DependencyFlags::empty(),
view_offset: 0,
_ne: crate::NonExhaustive(()),
}
}

View File

@ -337,7 +337,7 @@ pub use self::{
#[cfg(target_os = "ios")]
pub use surface::IOSMetalLayer;
use crate::sync::Semaphore;
use crate::sync::semaphore::Semaphore;
use std::{
num::NonZeroU64,
sync::{atomic::AtomicBool, Arc},

View File

@ -22,8 +22,10 @@ use crate::{
macros::vulkan_enum,
swapchain::{PresentInfo, SurfaceApi, SurfaceInfo, SurfaceSwapchainLock},
sync::{
AccessCheckError, AccessError, AccessFlags, Fence, FenceError, FlushError, GpuFuture,
PipelineStages, Semaphore, SemaphoreError, Sharing, SubmitAnyBuilder,
fence::{Fence, FenceError},
future::{AccessCheckError, AccessError, FlushError, GpuFuture, SubmitAnyBuilder},
semaphore::{Semaphore, SemaphoreError},
AccessFlags, PipelineStages, Sharing,
},
DeviceSize, OomError, RequirementNotMet, RequiresOneOf, VulkanError, VulkanObject,
};

View File

@ -7,6 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
//! An event provides fine-grained synchronization within a single queue, or from the host to a
//! queue.
use crate::{
device::{Device, DeviceOwned},
OomError, RequiresOneOf, VulkanError, VulkanObject,
@ -296,7 +299,7 @@ impl From<VulkanError> for EventError {
#[cfg(test)]
mod tests {
use crate::{sync::Event, VulkanObject};
use crate::{sync::event::Event, VulkanObject};
#[test]
fn event_create() {

View File

@ -7,6 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
//! A fence provides synchronization between the device and the host, or between an external source
//! and the host.
use crate::{
device::{Device, DeviceOwned, Queue},
macros::{vulkan_bitflags, vulkan_bitflags_enum},
@ -1608,7 +1611,7 @@ impl From<RequirementNotMet> for FenceError {
#[cfg(test)]
mod tests {
use crate::{
sync::{fence::FenceCreateInfo, Fence},
sync::fence::{Fence, FenceCreateInfo},
VulkanObject,
};
use std::time::Duration;

View File

@ -14,7 +14,11 @@ use crate::{
device::{Device, DeviceOwned, Queue, QueueFlags},
image::{sys::Image, ImageLayout},
swapchain::Swapchain,
sync::{AccessError, AccessFlags, Fence, PipelineStages, SubmitAnyBuilder},
sync::{
fence::Fence,
future::{AccessError, SubmitAnyBuilder},
AccessFlags, PipelineStages,
},
DeviceSize, OomError,
};
use parking_lot::{Mutex, MutexGuard};

View File

@ -7,13 +7,100 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Represents an event that will happen on the GPU in the future.
//!
//! Whenever you ask the GPU to start an operation by using a function of the vulkano library (for
//! example executing a command buffer), this function will return a *future*. A future is an
//! object that implements [the `GpuFuture` trait](crate::sync::GpuFuture) and that represents the
//! point in time when this operation is over.
//!
//! No function in vulkano immediately sends an operation to the GPU (with the exception of some
//! unsafe low-level functions). Instead they return a future that is in the pending state. Before
//! the GPU actually starts doing anything, you have to *flush* the future by calling the `flush()`
//! method or one of its derivatives.
//!
//! Futures serve several roles:
//!
//! - Futures can be used to build dependencies between operations and makes it possible to ask
//! that an operation starts only after a previous operation is finished.
//! - Submitting an operation to the GPU is a costly operation. By chaining multiple operations
//! with futures you will submit them all at once instead of one by one, thereby reducing this
//! cost.
//! - Futures keep alive the resources and objects used by the GPU so that they don't get destroyed
//! while they are still in use.
//!
//! The last point means that you should keep futures alive in your program for as long as their
//! corresponding operation is potentially still being executed by the GPU. Dropping a future
//! earlier will block the current thread (after flushing, if necessary) until the GPU has finished
//! the operation, which is usually not what you want.
//!
//! If you write a function that submits an operation to the GPU in your program, you are
//! encouraged to let this function return the corresponding future and let the caller handle it.
//! This way the caller will be able to chain multiple futures together and decide when it wants to
//! keep the future alive or drop it.
//!
//! # Executing an operation after a future
//!
//! Respecting the order of operations on the GPU is important, as it is what *proves* vulkano that
//! what you are doing is indeed safe. For example if you submit two operations that modify the
//! same buffer, then you need to execute one after the other instead of submitting them
//! independently. Failing to do so would mean that these two operations could potentially execute
//! simultaneously on the GPU, which would be unsafe.
//!
//! This is done by calling one of the methods of the `GpuFuture` trait. For example calling
//! `prev_future.then_execute(command_buffer)` takes ownership of `prev_future` and will make sure
//! to only start executing `command_buffer` after the moment corresponding to `prev_future`
//! happens. The object returned by the `then_execute` function is itself a future that corresponds
//! to the moment when the execution of `command_buffer` ends.
//!
//! ## Between two different GPU queues
//!
//! When you want to perform an operation after another operation on two different queues, you
//! **must** put a *semaphore* between them. Failure to do so would result in a runtime error.
//! Adding a semaphore is a simple as replacing `prev_future.then_execute(...)` with
//! `prev_future.then_signal_semaphore().then_execute(...)`.
//!
//! > **Note**: A common use-case is using a transfer queue (ie. a queue that is only capable of
//! > performing transfer operations) to write data to a buffer, then read that data from the
//! > rendering queue.
//!
//! What happens when you do so is that the first queue will execute the first set of operations
//! (represented by `prev_future` in the example), then put a semaphore in the signalled state.
//! Meanwhile the second queue blocks (if necessary) until that same semaphore gets signalled, and
//! then only will execute the second set of operations.
//!
//! Since you want to avoid blocking the second queue as much as possible, you probably want to
//! flush the operation to the first queue as soon as possible. This can easily be done by calling
//! `then_signal_semaphore_and_flush()` instead of `then_signal_semaphore()`.
//!
//! ## Between several different GPU queues
//!
//! The `then_signal_semaphore()` method is appropriate when you perform an operation in one queue,
//! and want to see the result in another queue. However in some situations you want to start
//! multiple operations on several different queues.
//!
//! TODO: this is not yet implemented
//!
//! # Fences
//!
//! A `Fence` is an object that is used to signal the CPU when an operation on the GPU is finished.
//!
//! Signalling a fence is done by calling `then_signal_fence()` on a future. Just like semaphores,
//! you are encouraged to use `then_signal_fence_and_flush()` instead.
//!
//! Signalling a fence is kind of a "terminator" to a chain of futures
pub use self::{
fence_signal::{FenceSignalFuture, FenceSignalFutureBehavior},
join::JoinFuture,
now::{now, NowFuture},
semaphore_signal::SemaphoreSignalFuture,
};
use super::{AccessFlags, Fence, FenceError, PipelineStages, Semaphore};
use super::{
fence::{Fence, FenceError},
semaphore::Semaphore,
AccessFlags, PipelineStages,
};
use crate::{
buffer::sys::Buffer,
command_buffer::{

View File

@ -14,7 +14,7 @@ use crate::{
device::{Device, DeviceOwned, Queue},
image::{sys::Image, ImageLayout},
swapchain::Swapchain,
sync::{AccessError, AccessFlags, PipelineStages, Semaphore},
sync::{future::AccessError, semaphore::Semaphore, AccessFlags, PipelineStages},
DeviceSize,
};
use parking_lot::Mutex;

View File

@ -15,127 +15,23 @@
//!
//! This safety is enforced at runtime by vulkano but it is not magic and you will require some
//! knowledge if you want to avoid errors.
//!
//! # Futures
//!
//! Whenever you ask the GPU to start an operation by using a function of the vulkano library (for
//! example executing a command buffer), this function will return a *future*. A future is an
//! object that implements [the `GpuFuture` trait](crate::sync::GpuFuture) and that represents the
//! point in time when this operation is over.
//!
//! No function in vulkano immediately sends an operation to the GPU (with the exception of some
//! unsafe low-level functions). Instead they return a future that is in the pending state. Before
//! the GPU actually starts doing anything, you have to *flush* the future by calling the `flush()`
//! method or one of its derivatives.
//!
//! Futures serve several roles:
//!
//! - Futures can be used to build dependencies between operations and makes it possible to ask
//! that an operation starts only after a previous operation is finished.
//! - Submitting an operation to the GPU is a costly operation. By chaining multiple operations
//! with futures you will submit them all at once instead of one by one, thereby reducing this
//! cost.
//! - Futures keep alive the resources and objects used by the GPU so that they don't get destroyed
//! while they are still in use.
//!
//! The last point means that you should keep futures alive in your program for as long as their
//! corresponding operation is potentially still being executed by the GPU. Dropping a future
//! earlier will block the current thread (after flushing, if necessary) until the GPU has finished
//! the operation, which is usually not what you want.
//!
//! If you write a function that submits an operation to the GPU in your program, you are
//! encouraged to let this function return the corresponding future and let the caller handle it.
//! This way the caller will be able to chain multiple futures together and decide when it wants to
//! keep the future alive or drop it.
//!
//! # Executing an operation after a future
//!
//! Respecting the order of operations on the GPU is important, as it is what *proves* vulkano that
//! what you are doing is indeed safe. For example if you submit two operations that modify the
//! same buffer, then you need to execute one after the other instead of submitting them
//! independently. Failing to do so would mean that these two operations could potentially execute
//! simultaneously on the GPU, which would be unsafe.
//!
//! This is done by calling one of the methods of the `GpuFuture` trait. For example calling
//! `prev_future.then_execute(command_buffer)` takes ownership of `prev_future` and will make sure
//! to only start executing `command_buffer` after the moment corresponding to `prev_future`
//! happens. The object returned by the `then_execute` function is itself a future that corresponds
//! to the moment when the execution of `command_buffer` ends.
//!
//! ## Between two different GPU queues
//!
//! When you want to perform an operation after another operation on two different queues, you
//! **must** put a *semaphore* between them. Failure to do so would result in a runtime error.
//! Adding a semaphore is a simple as replacing `prev_future.then_execute(...)` with
//! `prev_future.then_signal_semaphore().then_execute(...)`.
//!
//! > **Note**: A common use-case is using a transfer queue (ie. a queue that is only capable of
//! > performing transfer operations) to write data to a buffer, then read that data from the
//! > rendering queue.
//!
//! What happens when you do so is that the first queue will execute the first set of operations
//! (represented by `prev_future` in the example), then put a semaphore in the signalled state.
//! Meanwhile the second queue blocks (if necessary) until that same semaphore gets signalled, and
//! then only will execute the second set of operations.
//!
//! Since you want to avoid blocking the second queue as much as possible, you probably want to
//! flush the operation to the first queue as soon as possible. This can easily be done by calling
//! `then_signal_semaphore_and_flush()` instead of `then_signal_semaphore()`.
//!
//! ## Between several different GPU queues
//!
//! The `then_signal_semaphore()` method is appropriate when you perform an operation in one queue,
//! and want to see the result in another queue. However in some situations you want to start
//! multiple operations on several different queues.
//!
//! TODO: this is not yet implemented
//!
//! # Fences
//!
//! A `Fence` is an object that is used to signal the CPU when an operation on the GPU is finished.
//!
//! Signalling a fence is done by calling `then_signal_fence()` on a future. Just like semaphores,
//! you are encouraged to use `then_signal_fence_and_flush()` instead.
//!
//! Signalling a fence is kind of a "terminator" to a chain of futures.
//!
//! TODO: lots of problems with how to use fences
//! TODO: talk about fence + semaphore simultaneously
//! TODO: talk about using fences to clean up
#[cfg(unix)]
pub use self::fence::ImportFenceFdInfo;
#[cfg(windows)]
pub use self::fence::ImportFenceWin32HandleInfo;
pub use self::{
event::{Event, EventCreateInfo},
fence::{
ExternalFenceHandleType, ExternalFenceHandleTypes, ExternalFenceInfo,
ExternalFenceProperties, Fence, FenceCreateInfo, FenceError, FenceImportFlags,
},
future::{
now, AccessCheckError, AccessError, FenceSignalFuture, FlushError, GpuFuture, JoinFuture,
NowFuture, SemaphoreSignalFuture, SubmitAnyBuilder,
},
future::{now, FlushError, GpuFuture},
pipeline::{
AccessFlags, BufferMemoryBarrier, DependencyInfo, ImageMemoryBarrier, MemoryBarrier,
PipelineMemoryAccess, PipelineStage, PipelineStages, QueueFamilyTransfer,
},
semaphore::{
ExternalSemaphoreHandleType, ExternalSemaphoreHandleTypes, ExternalSemaphoreInfo,
ExternalSemaphoreProperties, Semaphore, SemaphoreCreateInfo, SemaphoreError,
SemaphoreImportFlags,
AccessFlags, BufferMemoryBarrier, DependencyFlags, DependencyInfo, ImageMemoryBarrier,
MemoryBarrier, PipelineMemoryAccess, PipelineStage, PipelineStages,
QueueFamilyOwnershipTransfer,
},
};
pub(crate) use self::{fence::FenceState, semaphore::SemaphoreState};
use crate::device::Queue;
use std::sync::Arc;
mod event;
mod fence;
mod future;
pub mod event;
pub mod fence;
pub mod future;
mod pipeline;
mod semaphore;
pub mod semaphore;
/// Declares in which queue(s) a resource can be used.
///

View File

@ -9,10 +9,10 @@
use crate::{
buffer::sys::Buffer,
device::QueueFlags,
device::{Device, QueueFlags},
image::{sys::Image, ImageAspects, ImageLayout, ImageSubresourceRange},
macros::{vulkan_bitflags, vulkan_bitflags_enum},
DeviceSize,
DeviceSize, RequirementNotMet, Version,
};
use smallvec::SmallVec;
use std::{ops::Range, sync::Arc};
@ -146,283 +146,10 @@ vulkan_bitflags_enum! {
self
}
/// Returns the access types that are supported with the given pipeline stages.
///
/// Corresponds to the table
/// "[Supported access types](https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-access-types-supported)"
/// in the Vulkan specification.
#[inline]
pub fn supported_access(mut self) -> AccessFlags {
if self.is_empty() {
return AccessFlags::empty();
}
self = self.normalize();
let mut result = AccessFlags::MEMORY_READ | AccessFlags::MEMORY_WRITE;
if self.intersects(PipelineStages::DRAW_INDIRECT) {
result |=
AccessFlags::INDIRECT_COMMAND_READ | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ;
}
if self.intersects(PipelineStages::VERTEX_INPUT) {}
if self.intersects(PipelineStages::VERTEX_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if self.intersects(PipelineStages::TESSELLATION_CONTROL_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if self.intersects(PipelineStages::TESSELLATION_EVALUATION_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if self.intersects(PipelineStages::GEOMETRY_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if self.intersects(PipelineStages::FRAGMENT_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ
| AccessFlags::INPUT_ATTACHMENT_READ;
}
if self.intersects(PipelineStages::EARLY_FRAGMENT_TESTS) {
result |= AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
| AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE;
}
if self.intersects(PipelineStages::LATE_FRAGMENT_TESTS) {
result |= AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
| AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE;
}
if self.intersects(PipelineStages::COLOR_ATTACHMENT_OUTPUT) {
result |= AccessFlags::COLOR_ATTACHMENT_READ
| AccessFlags::COLOR_ATTACHMENT_WRITE
| AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT;
}
if self.intersects(PipelineStages::COMPUTE_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if self.intersects(PipelineStages::ALL_TRANSFER) {}
if self.intersects(PipelineStages::BOTTOM_OF_PIPE) {}
if self.intersects(PipelineStages::HOST) {
result |= AccessFlags::HOST_READ | AccessFlags::HOST_WRITE;
}
if self.intersects(PipelineStages::ALL_GRAPHICS) {}
if self.intersects(PipelineStages::ALL_COMMANDS) {}
if self.intersects(PipelineStages::COPY) {
result |= AccessFlags::TRANSFER_READ | AccessFlags::TRANSFER_WRITE;
}
if self.intersects(PipelineStages::RESOLVE) {
result |= AccessFlags::TRANSFER_READ | AccessFlags::TRANSFER_WRITE;
}
if self.intersects(PipelineStages::BLIT) {
result |= AccessFlags::TRANSFER_READ | AccessFlags::TRANSFER_WRITE;
}
if self.intersects(PipelineStages::CLEAR) {
result |= AccessFlags::TRANSFER_WRITE;
}
if self.intersects(PipelineStages::INDEX_INPUT) {
result |= AccessFlags::INDEX_READ;
}
if self.intersects(PipelineStages::VERTEX_ATTRIBUTE_INPUT) {
result |= AccessFlags::VERTEX_ATTRIBUTE_READ;
}
if self.intersects(PipelineStages::PRE_RASTERIZATION_SHADERS) {}
if self.intersects(PipelineStages::VIDEO_DECODE) {
result |= AccessFlags::VIDEO_DECODE_READ | AccessFlags::VIDEO_DECODE_WRITE;
}
if self.intersects(PipelineStages::VIDEO_ENCODE) {
result |= AccessFlags::VIDEO_ENCODE_READ | AccessFlags::VIDEO_ENCODE_WRITE;
}
if self.intersects(PipelineStages::TRANSFORM_FEEDBACK) {
result |= AccessFlags::TRANSFORM_FEEDBACK_WRITE
| AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE
| AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ;
}
if self.intersects(PipelineStages::CONDITIONAL_RENDERING) {
result |= AccessFlags::CONDITIONAL_RENDERING_READ;
}
if self.intersects(PipelineStages::ACCELERATION_STRUCTURE_BUILD) {
result |= AccessFlags::INDIRECT_COMMAND_READ
| AccessFlags::SHADER_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::TRANSFER_READ
| AccessFlags::TRANSFER_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ
| AccessFlags::ACCELERATION_STRUCTURE_WRITE;
}
if self.intersects(PipelineStages::RAY_TRACING_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if self.intersects(PipelineStages::FRAGMENT_DENSITY_PROCESS) {
result |= AccessFlags::FRAGMENT_DENSITY_MAP_READ;
}
if self.intersects(PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT) {
result |= AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ;
}
if self.intersects(PipelineStages::COMMAND_PREPROCESS) {
result |= AccessFlags::COMMAND_PREPROCESS_READ | AccessFlags::COMMAND_PREPROCESS_WRITE;
}
if self.intersects(PipelineStages::TASK_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if self.intersects(PipelineStages::MESH_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if self.intersects(PipelineStages::SUBPASS_SHADING) {
result |= AccessFlags::INPUT_ATTACHMENT_READ;
}
if self.intersects(PipelineStages::INVOCATION_MASK) {
result |= AccessFlags::INVOCATION_MASK_READ;
}
result
}
},
/// A single stage in the device's processing pipeline.
PipelineStage impl {
#[inline]
pub fn required_queue_flags(self) -> QueueFlags {
// https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-pipeline-stages-supported
match self {
Self::TopOfPipe => QueueFlags::empty(),
Self::DrawIndirect => QueueFlags::GRAPHICS | QueueFlags::COMPUTE,
Self::VertexInput => QueueFlags::GRAPHICS,
Self::VertexShader => QueueFlags::GRAPHICS,
Self::TessellationControlShader => QueueFlags::GRAPHICS,
Self::TessellationEvaluationShader => QueueFlags::GRAPHICS,
Self::GeometryShader => QueueFlags::GRAPHICS,
Self::FragmentShader => QueueFlags::GRAPHICS,
Self::EarlyFragmentTests => QueueFlags::GRAPHICS,
Self::LateFragmentTests => QueueFlags::GRAPHICS,
Self::ColorAttachmentOutput => QueueFlags::GRAPHICS,
Self::ComputeShader => QueueFlags::COMPUTE,
Self::AllTransfer => QueueFlags::GRAPHICS | QueueFlags::COMPUTE | QueueFlags::TRANSFER,
Self::BottomOfPipe => QueueFlags::empty(),
Self::Host => QueueFlags::empty(),
Self::AllGraphics => QueueFlags::GRAPHICS,
Self::AllCommands => QueueFlags::empty(),
Self::Copy => todo!(
"The spec doesn't currently say which queue flags support this pipeline stage"
),
Self::Resolve => todo!(
"The spec doesn't currently say which queue flags support this pipeline stage"
),
Self::Blit => todo!(
"The spec doesn't currently say which queue flags support this pipeline stage"
),
Self::Clear => todo!(
"The spec doesn't currently say which queue flags support this pipeline stage"
),
Self::IndexInput => QueueFlags::GRAPHICS,
Self::VertexAttributeInput => QueueFlags::GRAPHICS,
Self::PreRasterizationShaders => QueueFlags::GRAPHICS,
Self::VideoDecode => QueueFlags::VIDEO_DECODE,
Self::VideoEncode => QueueFlags::VIDEO_ENCODE,
Self::ConditionalRendering => QueueFlags::GRAPHICS | QueueFlags::COMPUTE,
Self::TransformFeedback => QueueFlags::GRAPHICS,
Self::CommandPreprocess => QueueFlags::GRAPHICS | QueueFlags::COMPUTE,
Self::FragmentShadingRateAttachment => QueueFlags::GRAPHICS,
Self::TaskShader => QueueFlags::GRAPHICS,
Self::MeshShader => QueueFlags::GRAPHICS,
Self::AccelerationStructureBuild => QueueFlags::COMPUTE,
Self::RayTracingShader => QueueFlags::COMPUTE,
Self::FragmentDensityProcess => QueueFlags::GRAPHICS,
Self::SubpassShading => QueueFlags::GRAPHICS,
Self::InvocationMask => todo!(
"The spec doesn't currently say which queue flags support this pipeline stage"
),
}
}
},
PipelineStage,
= PipelineStageFlags2(u64);
@ -659,6 +386,78 @@ vulkan_bitflags_enum! {
*/
}
impl From<QueueFlags> for PipelineStages {
/// Corresponds to the table "[Supported pipeline stage flags]" in the Vulkan specification.
///
/// [Supported pipeline stage flags]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-pipeline-stages-supported
#[inline]
fn from(val: QueueFlags) -> Self {
let mut result = PipelineStages::TOP_OF_PIPE
| PipelineStages::BOTTOM_OF_PIPE
| PipelineStages::HOST
| PipelineStages::ALL_COMMANDS;
if val.intersects(QueueFlags::GRAPHICS | QueueFlags::COMPUTE | QueueFlags::TRANSFER) {
result |= PipelineStages::ALL_TRANSFER
| PipelineStages::COPY
| PipelineStages::RESOLVE
| PipelineStages::BLIT
| PipelineStages::CLEAR;
//| PipelineStages::ACCELERATION_STRUCTURE_COPY;
}
if val.intersects(QueueFlags::GRAPHICS) {
result |= PipelineStages::DRAW_INDIRECT
| PipelineStages::VERTEX_INPUT
| PipelineStages::VERTEX_SHADER
| PipelineStages::TESSELLATION_CONTROL_SHADER
| PipelineStages::TESSELLATION_EVALUATION_SHADER
| PipelineStages::GEOMETRY_SHADER
| PipelineStages::FRAGMENT_SHADER
| PipelineStages::EARLY_FRAGMENT_TESTS
| PipelineStages::LATE_FRAGMENT_TESTS
| PipelineStages::COLOR_ATTACHMENT_OUTPUT
| PipelineStages::ALL_GRAPHICS
| PipelineStages::INDEX_INPUT
| PipelineStages::VERTEX_ATTRIBUTE_INPUT
| PipelineStages::PRE_RASTERIZATION_SHADERS
| PipelineStages::CONDITIONAL_RENDERING
| PipelineStages::TRANSFORM_FEEDBACK
| PipelineStages::COMMAND_PREPROCESS
| PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT
| PipelineStages::TASK_SHADER
| PipelineStages::MESH_SHADER
| PipelineStages::FRAGMENT_DENSITY_PROCESS
| PipelineStages::SUBPASS_SHADING
| PipelineStages::INVOCATION_MASK;
}
if val.intersects(QueueFlags::COMPUTE) {
result |= PipelineStages::DRAW_INDIRECT
| PipelineStages::COMPUTE_SHADER
| PipelineStages::CONDITIONAL_RENDERING
| PipelineStages::COMMAND_PREPROCESS
| PipelineStages::ACCELERATION_STRUCTURE_BUILD
| PipelineStages::RAY_TRACING_SHADER;
//| PipelineStages::MICROMAP_BUILD;
}
if val.intersects(QueueFlags::VIDEO_DECODE) {
result |= PipelineStages::VIDEO_DECODE;
}
if val.intersects(QueueFlags::VIDEO_ENCODE) {
result |= PipelineStages::VIDEO_ENCODE;
}
/*if val.intersects(QueueFlags::OPTICAL_FLOW) {
result |= PipelineStages::OPTICAL_FLOW;
}*/
result
}
}
impl From<PipelineStage> for ash::vk::PipelineStageFlags {
#[inline]
fn from(val: PipelineStage) -> Self {
@ -937,6 +736,213 @@ vulkan_bitflags! {
*/
}
impl From<PipelineStages> for AccessFlags {
/// Corresponds to the table "[Supported access types]" in the Vulkan specification.
///
/// [Supported access types]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/chap7.html#synchronization-access-types-supported
#[inline]
fn from(mut val: PipelineStages) -> Self {
if val.is_empty() {
return AccessFlags::empty();
}
val = val.normalize();
let mut result = AccessFlags::MEMORY_READ | AccessFlags::MEMORY_WRITE;
if val.intersects(PipelineStages::DRAW_INDIRECT) {
result |=
AccessFlags::INDIRECT_COMMAND_READ | AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ;
}
if val.intersects(PipelineStages::VERTEX_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if val.intersects(PipelineStages::TESSELLATION_CONTROL_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if val.intersects(PipelineStages::TESSELLATION_EVALUATION_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if val.intersects(PipelineStages::GEOMETRY_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if val.intersects(PipelineStages::FRAGMENT_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ
| AccessFlags::INPUT_ATTACHMENT_READ;
}
if val.intersects(PipelineStages::EARLY_FRAGMENT_TESTS) {
result |= AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
| AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE;
}
if val.intersects(PipelineStages::LATE_FRAGMENT_TESTS) {
result |= AccessFlags::DEPTH_STENCIL_ATTACHMENT_READ
| AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE;
}
if val.intersects(PipelineStages::COLOR_ATTACHMENT_OUTPUT) {
result |= AccessFlags::COLOR_ATTACHMENT_READ
| AccessFlags::COLOR_ATTACHMENT_WRITE
| AccessFlags::COLOR_ATTACHMENT_READ_NONCOHERENT;
}
if val.intersects(PipelineStages::COMPUTE_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if val.intersects(PipelineStages::HOST) {
result |= AccessFlags::HOST_READ | AccessFlags::HOST_WRITE;
}
if val.intersects(PipelineStages::COPY) {
result |= AccessFlags::TRANSFER_READ | AccessFlags::TRANSFER_WRITE;
}
if val.intersects(PipelineStages::RESOLVE) {
result |= AccessFlags::TRANSFER_READ | AccessFlags::TRANSFER_WRITE;
}
if val.intersects(PipelineStages::BLIT) {
result |= AccessFlags::TRANSFER_READ | AccessFlags::TRANSFER_WRITE;
}
if val.intersects(PipelineStages::CLEAR) {
result |= AccessFlags::TRANSFER_WRITE;
}
if val.intersects(PipelineStages::INDEX_INPUT) {
result |= AccessFlags::INDEX_READ;
}
if val.intersects(PipelineStages::VERTEX_ATTRIBUTE_INPUT) {
result |= AccessFlags::VERTEX_ATTRIBUTE_READ;
}
if val.intersects(PipelineStages::VIDEO_DECODE) {
result |= AccessFlags::VIDEO_DECODE_READ | AccessFlags::VIDEO_DECODE_WRITE;
}
if val.intersects(PipelineStages::VIDEO_ENCODE) {
result |= AccessFlags::VIDEO_ENCODE_READ | AccessFlags::VIDEO_ENCODE_WRITE;
}
if val.intersects(PipelineStages::TRANSFORM_FEEDBACK) {
result |= AccessFlags::TRANSFORM_FEEDBACK_WRITE
| AccessFlags::TRANSFORM_FEEDBACK_COUNTER_WRITE
| AccessFlags::TRANSFORM_FEEDBACK_COUNTER_READ;
}
if val.intersects(PipelineStages::CONDITIONAL_RENDERING) {
result |= AccessFlags::CONDITIONAL_RENDERING_READ;
}
if val.intersects(PipelineStages::ACCELERATION_STRUCTURE_BUILD) {
result |= AccessFlags::INDIRECT_COMMAND_READ
| AccessFlags::SHADER_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::TRANSFER_READ
| AccessFlags::TRANSFER_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ
| AccessFlags::ACCELERATION_STRUCTURE_WRITE;
}
if val.intersects(PipelineStages::RAY_TRACING_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if val.intersects(PipelineStages::FRAGMENT_DENSITY_PROCESS) {
result |= AccessFlags::FRAGMENT_DENSITY_MAP_READ;
}
if val.intersects(PipelineStages::FRAGMENT_SHADING_RATE_ATTACHMENT) {
result |= AccessFlags::FRAGMENT_SHADING_RATE_ATTACHMENT_READ;
}
if val.intersects(PipelineStages::COMMAND_PREPROCESS) {
result |= AccessFlags::COMMAND_PREPROCESS_READ | AccessFlags::COMMAND_PREPROCESS_WRITE;
}
if val.intersects(PipelineStages::TASK_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if val.intersects(PipelineStages::MESH_SHADER) {
result |= AccessFlags::SHADER_READ
| AccessFlags::UNIFORM_READ
| AccessFlags::SHADER_SAMPLED_READ
| AccessFlags::SHADER_STORAGE_READ
| AccessFlags::SHADER_WRITE
| AccessFlags::SHADER_STORAGE_WRITE
| AccessFlags::ACCELERATION_STRUCTURE_READ;
}
if val.intersects(PipelineStages::SUBPASS_SHADING) {
result |= AccessFlags::INPUT_ATTACHMENT_READ;
}
if val.intersects(PipelineStages::INVOCATION_MASK) {
result |= AccessFlags::INVOCATION_MASK_READ;
}
result
}
}
impl From<AccessFlags> for ash::vk::AccessFlags {
#[inline]
fn from(val: AccessFlags) -> Self {
@ -955,12 +961,13 @@ pub struct PipelineMemoryAccess {
pub exclusive: bool,
}
/// Dependency info for a pipeline barrier.
/// Dependency info for barriers in a pipeline barrier or event command.
///
/// A pipeline barrier creates a dependency between commands submitted before the barrier (the
/// source scope) and commands submitted after it (the destination scope). A pipeline barrier
/// consists of multiple individual barriers that concern a either single resource or
/// operate globally.
/// source scope) and commands submitted after it (the destination scope). An event command acts
/// like a split pipeline barrier: the source scope and destination scope are defined
/// relative to different commands. Each `DependencyInfo` consists of multiple individual barriers
/// that concern a either single resource or operate globally.
///
/// Each barrier has a set of source/destination pipeline stages and source/destination memory
/// access types. The pipeline stages create an *execution dependency*: the `src_stages` of
@ -971,13 +978,24 @@ pub struct PipelineMemoryAccess {
/// are made after the barrier.
#[derive(Clone, Debug)]
pub struct DependencyInfo {
/// Flags to modify how the execution and memory dependencies are formed.
///
/// The default value is empty.
pub dependency_flags: DependencyFlags,
/// Memory barriers for global operations and accesses, not limited to a single resource.
///
/// The default value is empty.
pub memory_barriers: SmallVec<[MemoryBarrier; 2]>,
/// Memory barriers for individual buffers.
///
/// The default value is empty.
pub buffer_memory_barriers: SmallVec<[BufferMemoryBarrier; 8]>,
/// Memory barriers for individual images.
///
/// The default value is empty.
pub image_memory_barriers: SmallVec<[ImageMemoryBarrier; 8]>,
pub _ne: crate::NonExhaustive,
@ -1005,6 +1023,7 @@ impl Default for DependencyInfo {
#[inline]
fn default() -> Self {
Self {
dependency_flags: DependencyFlags::empty(),
memory_barriers: SmallVec::new(),
buffer_memory_barriers: SmallVec::new(),
image_memory_barriers: SmallVec::new(),
@ -1013,6 +1032,50 @@ impl Default for DependencyInfo {
}
}
vulkan_bitflags! {
#[non_exhaustive]
/// Flags that modify how execution and memory dependencies are formed.
DependencyFlags = DependencyFlags(u32);
/// For framebuffer-space pipeline stages, specifies that the dependency is framebuffer-local.
/// The implementation can start the destination operation for some given pixels as long as the
/// source operation is finished for these given pixels.
///
/// Framebuffer-local dependencies are usually more efficient, especially on tile-based
/// architectures.
BY_REGION = BY_REGION,
/// For devices that consist of multiple physical devices, specifies that the dependency is
/// device-local. The dependency will only apply to the operations on each physical device
/// individually, rather than applying to all physical devices as a whole. This allows each
/// physical device to operate independently of the others.
///
/// The device API version must be at least 1.1, or the [`khr_device_group`] extension must be
/// enabled on the device.
///
/// [`khr_device_group`]: crate::device::DeviceExtensions::khr_device_group
DEVICE_GROUP = DEVICE_GROUP {
api_version: V1_1,
device_extensions: [khr_device_group],
},
/// For subpass dependencies, and pipeline barriers executing within a render pass instance,
/// if the render pass uses multiview rendering, specifies that the dependency is view-local.
/// Each view in the destination subpass will only depend on a single view in the destination
/// subpass, instead of all views.
///
/// The device API version must be at least 1.1, or the [`khr_multiview`] extension must be
/// enabled on the device.
///
/// [`khr_multiview`]: crate::device::DeviceExtensions::khr_multiview
VIEW_LOCAL = VIEW_LOCAL {
api_version: V1_1,
device_extensions: [khr_multiview],
},
}
/// A memory barrier that is applied globally.
#[derive(Clone, Debug)]
pub struct MemoryBarrier {
@ -1075,7 +1138,7 @@ pub struct BufferMemoryBarrier {
/// For resources created with [`Sharing::Exclusive`](crate::sync::Sharing), transfers
/// ownership of a resource from one queue family to another.
pub queue_family_transfer: Option<QueueFamilyTransfer>,
pub queue_family_ownership_transfer: Option<QueueFamilyOwnershipTransfer>,
/// The buffer to apply the barrier to.
pub buffer: Arc<Buffer>,
@ -1094,7 +1157,7 @@ impl BufferMemoryBarrier {
src_access: AccessFlags::empty(),
dst_stages: PipelineStages::empty(),
dst_access: AccessFlags::empty(),
queue_family_transfer: None,
queue_family_ownership_transfer: None,
buffer,
range: 0..0,
_ne: crate::NonExhaustive(()),
@ -1134,7 +1197,7 @@ pub struct ImageMemoryBarrier {
/// For resources created with [`Sharing::Exclusive`](crate::sync::Sharing), transfers
/// ownership of a resource from one queue family to another.
pub queue_family_transfer: Option<QueueFamilyTransfer>,
pub queue_family_ownership_transfer: Option<QueueFamilyOwnershipTransfer>,
/// The image to apply the barrier to.
pub image: Arc<Image>,
@ -1155,7 +1218,7 @@ impl ImageMemoryBarrier {
dst_access: AccessFlags::empty(),
old_layout: ImageLayout::Undefined,
new_layout: ImageLayout::Undefined,
queue_family_transfer: None,
queue_family_ownership_transfer: None,
image,
subresource_range: ImageSubresourceRange {
aspects: ImageAspects::empty(), // Can't use image format aspects because `color` can't be specified with `planeN`.
@ -1168,11 +1231,265 @@ impl ImageMemoryBarrier {
}
/// Specifies a queue family ownership transfer for a resource.
///
/// There are three classes of queues that can be used in an ownership transfer:
/// - A **local** queue exists on the current [`Instance`] and [`Device`].
/// - An **external** queue does not exist on the current [`Instance`], but has the same
/// [`device_uuid`] and [`driver_uuid`] as the current [`Device`].
/// - A **foreign** queue can be an external queue, or any queue on another device for which the
/// mentioned parameters do not match.
///
/// [`Instance`]: crate::instance::Instance
/// [`Device`]: crate::device::Device
/// [`device_uuid`]: crate::device::Properties::device_uuid
/// [`driver_uuid`]: crate::device::Properties::driver_uuid
#[derive(Clone, Copy, Debug)]
pub struct QueueFamilyTransfer {
/// The queue family that currently owns the resource.
pub source_index: u32,
pub enum QueueFamilyOwnershipTransfer {
/// For a resource with [`Sharing::Exclusive`], transfers ownership between two local queues.
///
/// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
ExclusiveBetweenLocal {
/// The queue family that currently owns the resource.
src_index: u32,
/// The queue family to transfer ownership to.
pub destination_index: u32,
/// The queue family to transfer ownership to.
dst_index: u32,
},
/// For a resource with [`Sharing::Exclusive`], transfers ownership from a local queue to an
/// external queue.
///
/// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
/// be enabled on the device.
///
/// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
/// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
ExclusiveToExternal {
/// The queue family that currently owns the resource.
src_index: u32,
},
/// For a resource with [`Sharing::Exclusive`], transfers ownership from an external queue to a
/// local queue.
///
/// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
/// be enabled on the device.
///
/// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
/// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
ExclusiveFromExternal {
/// The queue family to transfer ownership to.
dst_index: u32,
},
/// For a resource with [`Sharing::Exclusive`], transfers ownership from a local queue to a
/// foreign queue.
///
/// The [`ext_queue_family_foreign`] extension must be enabled on the device.
///
/// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
/// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
ExclusiveToForeign {
/// The queue family that currently owns the resource.
src_index: u32,
},
/// For a resource with [`Sharing::Exclusive`], transfers ownership from a foreign queue to a
/// local queue.
///
/// The [`ext_queue_family_foreign`] extension must be enabled on the device.
///
/// [`Sharing::Exclusive`]: crate::sync::Sharing::Exclusive
/// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
ExclusiveFromForeign {
/// The queue family to transfer ownership to.
dst_index: u32,
},
/// For a resource with [`Sharing::Concurrent`], transfers ownership from its local queues to
/// an external queue.
///
/// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
/// be enabled on the device.
///
/// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
/// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
ConcurrentToExternal,
/// For a resource with [`Sharing::Concurrent`], transfers ownership from an external queue to
/// its local queues.
///
/// The device API version must be at least 1.1, or the [`khr_external_memory`] extension must
/// be enabled on the device.
///
/// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
/// [`khr_external_memory`]: crate::device::DeviceExtensions::khr_external_memory
ConcurrentFromExternal,
/// For a resource with [`Sharing::Concurrent`], transfers ownership from its local queues to
/// a foreign queue.
///
/// The [`ext_queue_family_foreign`] extension must be enabled on the device.
///
/// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
/// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
ConcurrentToForeign,
/// For a resource with [`Sharing::Concurrent`], transfers ownership from a foreign queue to
/// its local queues.
///
/// The [`ext_queue_family_foreign`] extension must be enabled on the device.
///
/// [`Sharing::Concurrent`]: crate::sync::Sharing::Concurrent
/// [`ext_queue_family_foreign`]: crate::device::DeviceExtensions::ext_queue_family_foreign
ConcurrentFromForeign,
}
impl QueueFamilyOwnershipTransfer {
pub(crate) fn validate_device(self, device: &Device) -> Result<(), RequirementNotMet> {
match self {
QueueFamilyOwnershipTransfer::ExclusiveToExternal { .. } => {
if !(device.api_version() >= Version::V1_1
|| device.enabled_extensions().khr_external_memory)
{
return Err(crate::RequirementNotMet {
required_for: "`QueueFamilyOwnershipTransfer::ExclusiveToExternal",
requires_one_of: crate::RequiresOneOf {
api_version: Some(Version::V1_1),
device_extensions: &["khr_external_memory"],
..Default::default()
},
});
}
}
QueueFamilyOwnershipTransfer::ExclusiveFromExternal { .. } => {
if !(device.api_version() >= Version::V1_1
|| device.enabled_extensions().khr_external_memory)
{
return Err(crate::RequirementNotMet {
required_for: "`QueueFamilyOwnershipTransfer::ExclusiveFromExternal",
requires_one_of: crate::RequiresOneOf {
api_version: Some(Version::V1_1),
device_extensions: &["khr_external_memory"],
..Default::default()
},
});
}
}
QueueFamilyOwnershipTransfer::ExclusiveToForeign { .. } => {
if !device.enabled_extensions().ext_queue_family_foreign {
return Err(crate::RequirementNotMet {
required_for: "`QueueFamilyOwnershipTransfer::ExclusiveToForeign",
requires_one_of: crate::RequiresOneOf {
device_extensions: &["ext_queue_family_foreign"],
..Default::default()
},
});
}
}
QueueFamilyOwnershipTransfer::ExclusiveFromForeign { .. } => {
if !device.enabled_extensions().ext_queue_family_foreign {
return Err(crate::RequirementNotMet {
required_for: "`QueueFamilyOwnershipTransfer::ExclusiveFromForeign",
requires_one_of: crate::RequiresOneOf {
device_extensions: &["ext_queue_family_foreign"],
..Default::default()
},
});
}
}
QueueFamilyOwnershipTransfer::ConcurrentToExternal => {
if !(device.api_version() >= Version::V1_1
|| device.enabled_extensions().khr_external_memory)
{
return Err(crate::RequirementNotMet {
required_for: "`QueueFamilyOwnershipTransfer::ConcurrentToExternal",
requires_one_of: crate::RequiresOneOf {
api_version: Some(Version::V1_1),
device_extensions: &["khr_external_memory"],
..Default::default()
},
});
}
}
QueueFamilyOwnershipTransfer::ConcurrentFromExternal => {
if !(device.api_version() >= Version::V1_1
|| device.enabled_extensions().khr_external_memory)
{
return Err(crate::RequirementNotMet {
required_for: "`QueueFamilyOwnershipTransfer::ConcurrentFromExternal",
requires_one_of: crate::RequiresOneOf {
api_version: Some(Version::V1_1),
device_extensions: &["khr_external_memory"],
..Default::default()
},
});
}
}
QueueFamilyOwnershipTransfer::ConcurrentToForeign => {
if !device.enabled_extensions().ext_queue_family_foreign {
return Err(crate::RequirementNotMet {
required_for: "`QueueFamilyOwnershipTransfer::ConcurrentToForeign",
requires_one_of: crate::RequiresOneOf {
device_extensions: &["ext_queue_family_foreign"],
..Default::default()
},
});
}
}
QueueFamilyOwnershipTransfer::ConcurrentFromForeign => {
if !device.enabled_extensions().ext_queue_family_foreign {
return Err(crate::RequirementNotMet {
required_for: "`QueueFamilyOwnershipTransfer::ConcurrentFromForeign",
requires_one_of: crate::RequiresOneOf {
device_extensions: &["ext_queue_family_foreign"],
..Default::default()
},
});
}
}
_ => (),
}
Ok(())
}
}
impl From<QueueFamilyOwnershipTransfer> for (u32, u32) {
fn from(val: QueueFamilyOwnershipTransfer) -> Self {
match val {
QueueFamilyOwnershipTransfer::ExclusiveBetweenLocal {
src_index,
dst_index,
} => (src_index, dst_index),
QueueFamilyOwnershipTransfer::ExclusiveToExternal { src_index } => {
(src_index, ash::vk::QUEUE_FAMILY_EXTERNAL)
}
QueueFamilyOwnershipTransfer::ExclusiveFromExternal { dst_index } => {
(ash::vk::QUEUE_FAMILY_EXTERNAL, dst_index)
}
QueueFamilyOwnershipTransfer::ExclusiveToForeign { src_index } => {
(src_index, ash::vk::QUEUE_FAMILY_FOREIGN_EXT)
}
QueueFamilyOwnershipTransfer::ExclusiveFromForeign { dst_index } => {
(ash::vk::QUEUE_FAMILY_FOREIGN_EXT, dst_index)
}
QueueFamilyOwnershipTransfer::ConcurrentToExternal => (
ash::vk::QUEUE_FAMILY_IGNORED,
ash::vk::QUEUE_FAMILY_EXTERNAL,
),
QueueFamilyOwnershipTransfer::ConcurrentFromExternal => (
ash::vk::QUEUE_FAMILY_EXTERNAL,
ash::vk::QUEUE_FAMILY_IGNORED,
),
QueueFamilyOwnershipTransfer::ConcurrentToForeign => (
ash::vk::QUEUE_FAMILY_IGNORED,
ash::vk::QUEUE_FAMILY_FOREIGN_EXT,
),
QueueFamilyOwnershipTransfer::ConcurrentFromForeign => (
ash::vk::QUEUE_FAMILY_FOREIGN_EXT,
ash::vk::QUEUE_FAMILY_IGNORED,
),
}
}
}

View File

@ -7,6 +7,9 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
//! A semaphore provides synchronization between multiple queues, with non-command buffer
//! commands on the same queue, or between the device and an external source.
use crate::{
device::{Device, DeviceOwned, Queue},
macros::{vulkan_bitflags, vulkan_bitflags_enum},
@ -1572,10 +1575,12 @@ mod tests {
use crate::{
device::{Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo},
instance::{Instance, InstanceCreateInfo, InstanceExtensions},
sync::{ExternalSemaphoreHandleType, ExternalSemaphoreHandleTypes, SemaphoreCreateInfo},
sync::semaphore::{
ExternalSemaphoreHandleType, ExternalSemaphoreHandleTypes, SemaphoreCreateInfo,
},
VulkanLibrary,
};
use crate::{sync::Semaphore, VulkanObject};
use crate::{sync::semaphore::Semaphore, VulkanObject};
#[test]
fn semaphore_create() {