From e6e4bc6a260782bfcc3e3be4d26f278880578909 Mon Sep 17 00:00:00 2001 From: marc0246 <40955683+marc0246@users.noreply.github.com> Date: Sun, 21 Jul 2024 13:14:55 +0200 Subject: [PATCH] Task graph [2/10]: the task graph data structure (#2545) --- Cargo.lock | 3 +- vulkano-taskgraph/src/graph/mod.rs | 1257 ++++++++++++++++++++++++++++ vulkano-taskgraph/src/lib.rs | 78 +- vulkano-taskgraph/src/resource.rs | 128 ++- vulkano/src/sync/mod.rs | 17 + 5 files changed, 1398 insertions(+), 85 deletions(-) create mode 100644 vulkano-taskgraph/src/graph/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 70638603..a1aa6c3b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2339,7 +2339,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00bbb9a832cd697a36c2abd5ef58c263b0bc33cdf280f704b895646ed3e9f595" dependencies = [ "libc", - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -2363,6 +2363,7 @@ dependencies = [ "half", "heck", "indexmap", + "libc", "libloading 0.8.3", "nom", "objc", diff --git a/vulkano-taskgraph/src/graph/mod.rs b/vulkano-taskgraph/src/graph/mod.rs new file mode 100644 index 00000000..22b3ad87 --- /dev/null +++ b/vulkano-taskgraph/src/graph/mod.rs @@ -0,0 +1,1257 @@ +//! The task graph data structure and associated types. + +use crate::{ + resource::{AccessType, BufferRange, ImageLayoutType}, + Id, InvalidSlotError, QueueFamilyType, Task, BUFFER_TAG, IMAGE_TAG, SWAPCHAIN_TAG, +}; +use concurrent_slotmap::{IterMut, IterUnprotected, SlotId, SlotMap}; +use smallvec::SmallVec; +use std::{borrow::Cow, error::Error, fmt, hint, iter::FusedIterator, ops::Range, sync::Arc}; +use vulkano::{ + buffer::{Buffer, BufferCreateInfo}, + device::{Device, DeviceOwned, Queue}, + format::Format, + image::{ + Image, ImageAspects, ImageCreateFlags, ImageCreateInfo, ImageLayout, ImageSubresourceRange, + }, + swapchain::{Swapchain, SwapchainCreateInfo}, + sync::{semaphore::Semaphore, AccessFlags, PipelineStages}, + DeviceSize, +}; + +const EXCLUSIVE_BIT: u32 = 1 << 6; +const VIRTUAL_BIT: u32 = 1 << 7; + +/// The task graph is a [directed acyclic graph] consisting of [`Task`] nodes, with edges +/// representing happens-before relations. +/// +/// [directed acyclic graph]: https://en.wikipedia.org/wiki/Directed_acyclic_graph +pub struct TaskGraph { + nodes: Nodes, + resources: Resources, +} + +struct Nodes { + inner: SlotMap>, +} + +struct Node { + name: Cow<'static, str>, + inner: NodeInner, + in_edges: Vec, + out_edges: Vec, +} + +enum NodeInner { + Task(TaskNode), + // TODO: + Semaphore, +} + +type NodeIndex = u32; + +struct Resources { + inner: SlotMap, +} + +#[derive(Clone, Copy)] +enum ResourceInfo { + Buffer(BufferInfo), + Image(ImageInfo), + Swapchain(SwapchainInfo), +} + +#[derive(Clone, Copy)] +struct BufferInfo { + size: DeviceSize, +} + +#[derive(Clone, Copy)] +struct ImageInfo { + flags: ImageCreateFlags, + format: Format, + array_layers: u32, + mip_levels: u32, +} + +#[derive(Clone, Copy)] +struct SwapchainInfo { + image_array_layers: u32, +} + +impl TaskGraph { + /// Creates a new `TaskGraph`. + /// + /// `max_nodes` is the maximum number of nodes the graph can ever have. `max_resources` is the + /// maximum number of resources the graph can ever have. + #[must_use] + pub fn new(max_nodes: u32, max_resources: u32) -> Self { + TaskGraph { + nodes: Nodes { + inner: SlotMap::new(max_nodes), + }, + resources: Resources { + inner: SlotMap::new(max_resources), + }, + } + } + + /// Creates a new [`TaskNode`] from the given `task` and adds it to the graph. Returns a + /// builder allowing you to add resource accesses to the task node. + /// + /// `queue_family_type` is the type of queue family the task can be executed on. + #[must_use] + pub fn create_task_node( + &mut self, + name: impl Into>, + queue_family_type: QueueFamilyType, + task: impl Task, + ) -> TaskNodeBuilder<'_, W> { + let id = self.nodes.add_node( + name.into(), + NodeInner::Task(TaskNode::new(queue_family_type, task)), + ); + + // SAFETY: We just inserted this task node. + let task_node = unsafe { self.nodes.task_node_unchecked_mut(id.index()) }; + + TaskNodeBuilder { + id, + task_node, + resources: &self.resources, + } + } + + /// Removes the task node corresponding to `id` from the graph. + pub fn remove_task_node(&mut self, id: NodeId) -> Result> { + self.task_node(id)?; + + let task = match self.nodes.remove_node(id).inner { + NodeInner::Task(task) => task, + // We checked that the node is a task above. + _ => unreachable!(), + }; + + Ok(task) + } + + /// Adds an edge starting at the node corresponding to `from` and ending at the node + /// corresponding to `to`. + pub fn add_edge(&mut self, from: NodeId, to: NodeId) -> Result { + let [from_node, to_node] = self.nodes.node_many_mut([from, to])?; + let out_edges = &mut from_node.out_edges; + let in_edges = &mut to_node.in_edges; + + if !out_edges.contains(&to.index()) { + out_edges.push(to.index()); + in_edges.push(from.index()); + + Ok(()) + } else { + Err(TaskGraphError::DuplicateEdge) + } + } + + /// Removes an edge starting at the node corresponding to `from` and ending at the node + /// corresponding to `to`. + pub fn remove_edge(&mut self, from: NodeId, to: NodeId) -> Result { + let [from_node, to_node] = self.nodes.node_many_mut([from, to])?; + let out_edges = &mut from_node.out_edges; + let in_edges = &mut to_node.in_edges; + + if let Some(index) = out_edges.iter().position(|&i| i == to.index()) { + out_edges.remove(index); + let edge_index = in_edges.iter().position(|&i| i == from.index()).unwrap(); + in_edges.remove(edge_index); + + Ok(()) + } else { + Err(TaskGraphError::InvalidEdge) + } + } + + /// Returns a reference to the task node corresponding to `id`. + #[inline] + pub fn task_node(&self, id: NodeId) -> Result<&TaskNode> { + self.nodes.task_node(id) + } + + /// Returns a mutable reference to the task node corresponding to `id`. + #[inline] + pub fn task_node_mut(&mut self, id: NodeId) -> Result<&mut TaskNode> { + self.nodes.task_node_mut(id) + } + + /// Returns an iterator over all [`TaskNode`]s. + #[inline] + pub fn task_nodes(&self) -> TaskNodes<'_, W> { + TaskNodes { + inner: self.nodes.nodes(), + } + } + + /// Returns an iterator over all [`TaskNode`]s that allows you to mutate them. + #[inline] + pub fn task_nodes_mut(&mut self) -> TaskNodesMut<'_, W> { + TaskNodesMut { + inner: self.nodes.nodes_mut(), + } + } + + /// Add a [virtual buffer resource] to the task graph. + #[must_use] + pub fn add_buffer(&mut self, create_info: &BufferCreateInfo) -> Id { + self.resources.add_buffer(create_info) + } + + /// Add a [virtual image resource] to the task graph. + #[must_use] + pub fn add_image(&mut self, create_info: &ImageCreateInfo) -> Id { + self.resources.add_image(create_info) + } + + /// Add a [virtual swapchain resource] to the task graph. + #[must_use] + pub fn add_swapchain(&mut self, create_info: &SwapchainCreateInfo) -> Id { + self.resources.add_swapchain(create_info) + } +} + +impl Nodes { + fn add_node(&mut self, name: Cow<'static, str>, inner: NodeInner) -> NodeId { + let slot = self.inner.insert_mut(Node { + name, + inner, + in_edges: Vec::new(), + out_edges: Vec::new(), + }); + + NodeId { slot } + } + + fn remove_node(&mut self, id: NodeId) -> Node { + let node = self.inner.remove_mut(id.slot).unwrap(); + + // NOTE(Marc): We must not leave any broken edges because the rest of the code relies on + // this being impossible. + + for &in_node_index in &node.in_edges { + // SAFETY: By our own invariant, node indices in the edges vectors must be valid. + let out_edges = &mut unsafe { self.node_unchecked_mut(in_node_index) }.out_edges; + let edge_index = out_edges.iter().position(|&i| i == id.index()).unwrap(); + out_edges.remove(edge_index); + } + + for &out_node_index in &node.out_edges { + // SAFETY: By our own invariant, node indices in the edges vectors must be valid. + let in_edges = &mut unsafe { self.node_unchecked_mut(out_node_index) }.in_edges; + let edge_index = in_edges.iter().position(|&i| i == id.index()).unwrap(); + in_edges.remove(edge_index); + } + + node + } + + fn capacity(&self) -> u32 { + self.inner.capacity() + } + + fn len(&self) -> u32 { + self.inner.len() + } + + fn task_node(&self, id: NodeId) -> Result<&TaskNode> { + let node = self.node(id)?; + + if let NodeInner::Task(task_node) = &node.inner { + Ok(task_node) + } else { + Err(TaskGraphError::InvalidNodeType) + } + } + + unsafe fn task_node_unchecked(&self, index: NodeIndex) -> &TaskNode { + // SAFETY: The caller must ensure that the `index` is valid. + let node = unsafe { self.node_unchecked(index) }; + + if let NodeInner::Task(task_node) = &node.inner { + task_node + } else { + // SAFETY: The caller must ensure that the node is a `TaskNode`. + unsafe { hint::unreachable_unchecked() } + } + } + + fn task_node_mut(&mut self, id: NodeId) -> Result<&mut TaskNode> { + let node = self.node_mut(id)?; + + if let NodeInner::Task(task_node) = &mut node.inner { + Ok(task_node) + } else { + Err(TaskGraphError::InvalidNodeType) + } + } + + unsafe fn task_node_unchecked_mut(&mut self, index: NodeIndex) -> &mut TaskNode { + // SAFETY: The caller must ensure that the `index` is valid. + let node = unsafe { self.node_unchecked_mut(index) }; + + if let NodeInner::Task(task_node) = &mut node.inner { + task_node + } else { + // SAFETY: The caller must ensure that the node is a `TaskNode`. + unsafe { hint::unreachable_unchecked() } + } + } + + fn node(&self, id: NodeId) -> Result<&Node> { + // SAFETY: We never modify the map concurrently. + unsafe { self.inner.get_unprotected(id.slot) }.ok_or(TaskGraphError::InvalidNode) + } + + unsafe fn node_unchecked(&self, index: NodeIndex) -> &Node { + // SAFETY: + // * The caller must ensure that the `index` is valid. + // * We never modify the map concurrently. + unsafe { self.inner.index_unchecked_unprotected(index) } + } + + fn node_mut(&mut self, id: NodeId) -> Result<&mut Node> { + self.inner + .get_mut(id.slot) + .ok_or(TaskGraphError::InvalidNode) + } + + unsafe fn node_unchecked_mut(&mut self, index: NodeIndex) -> &mut Node { + // SAFETY: The caller must ensure that the `index` is valid. + unsafe { self.inner.index_unchecked_mut(index) } + } + + fn node_many_mut(&mut self, ids: [NodeId; N]) -> Result<[&mut Node; N]> { + union Transmute { + src: [NodeId; N], + dst: [SlotId; N], + } + + // HACK: `transmute_unchecked` is not exposed even unstably at the moment, and the compiler + // isn't currently smart enough to figure this out using `transmute`. + // + // SAFETY: `NodeId` is `#[repr(transparent)]` over `SlotId` and both arrays have the same + // length. + let ids = unsafe { Transmute { src: ids }.dst }; + + self.inner + .get_many_mut(ids) + .ok_or(TaskGraphError::InvalidNode) + } + + fn nodes(&self) -> IterUnprotected<'_, Node> { + // SAFETY: We never modify the map concurrently. + unsafe { self.inner.iter_unprotected() } + } + + fn nodes_mut(&mut self) -> IterMut<'_, Node> { + self.inner.iter_mut() + } +} + +impl Resources { + fn add_buffer(&mut self, create_info: &BufferCreateInfo) -> Id { + let resource_info = ResourceInfo::Buffer(BufferInfo { + size: create_info.size, + }); + let mut tag = BUFFER_TAG | VIRTUAL_BIT; + + if create_info.sharing.is_exclusive() { + tag |= EXCLUSIVE_BIT; + } + + let slot = self.inner.insert_with_tag_mut(resource_info, tag); + + Id::new(slot) + } + + fn add_image(&mut self, create_info: &ImageCreateInfo) -> Id { + let resource_info = ResourceInfo::Image(ImageInfo { + flags: create_info.flags, + format: create_info.format, + array_layers: create_info.array_layers, + mip_levels: create_info.mip_levels, + }); + let mut tag = IMAGE_TAG | VIRTUAL_BIT; + + if create_info.sharing.is_exclusive() { + tag |= EXCLUSIVE_BIT; + } + + let slot = self.inner.insert_with_tag_mut(resource_info, tag); + + Id::new(slot) + } + + fn add_swapchain(&mut self, create_info: &SwapchainCreateInfo) -> Id { + let resource_info = ResourceInfo::Swapchain(SwapchainInfo { + image_array_layers: create_info.image_array_layers, + }); + let tag = SWAPCHAIN_TAG | VIRTUAL_BIT; + + let slot = self.inner.insert_with_tag_mut(resource_info, tag); + + Id::new(slot) + } + + fn capacity(&self) -> u32 { + self.inner.capacity() + } + + fn len(&self) -> u32 { + self.inner.len() + } + + fn buffer(&self, id: Id) -> Result<&BufferInfo, InvalidSlotError> { + // SAFETY: We never modify the map concurrently. + let resource_info = + unsafe { self.inner.get_unprotected(id.slot) }.ok_or(InvalidSlotError::new(id))?; + + if let ResourceInfo::Buffer(buffer) = resource_info { + Ok(buffer) + } else { + // SAFETY: The `get_unprotected` call above already successfully compared the tag, so + // there is no need to check it again. We always ensure that buffer IDs get tagged with + // the `BUFFER_TAG`. + unsafe { hint::unreachable_unchecked() } + } + } + + unsafe fn buffer_unchecked(&self, id: Id) -> &BufferInfo { + // SAFETY: + // * The caller must ensure that the `id` is valid. + // * We never modify the map concurrently. + let resource_info = unsafe { self.inner.index_unchecked_unprotected(id.index()) }; + + if let ResourceInfo::Buffer(buffer) = resource_info { + buffer + } else { + // SAFETY: The caller must ensure that the `id` is valid. + unsafe { hint::unreachable_unchecked() } + } + } + + fn image(&self, id: Id) -> Result<&ImageInfo, InvalidSlotError> { + // SAFETY: We never modify the map concurrently. + let resource_info = + unsafe { self.inner.get_unprotected(id.slot) }.ok_or(InvalidSlotError::new(id))?; + + if let ResourceInfo::Image(image) = resource_info { + Ok(image) + } else { + // SAFETY: The `get_unprotected` call above already successfully compared the tag, so + // there is no need to check it again. We always ensure that image IDs get tagged with + // the `IMAGE_TAG`. + unsafe { hint::unreachable_unchecked() } + } + } + + unsafe fn image_unchecked(&self, id: Id) -> &ImageInfo { + // SAFETY: + // * The caller must ensure that the `index` is valid. + // * We never modify the map concurrently. + let resource_info = unsafe { self.inner.index_unchecked_unprotected(id.index()) }; + + if let ResourceInfo::Image(image) = resource_info { + image + } else { + // SAFETY: The caller must ensure that the `id` is valid. + unsafe { hint::unreachable_unchecked() } + } + } + + fn swapchain(&self, id: Id) -> Result<&SwapchainInfo, InvalidSlotError> { + // SAFETY: We never modify the map concurrently. + let resource_info = + unsafe { self.inner.get_unprotected(id.slot) }.ok_or(InvalidSlotError::new(id))?; + + if let ResourceInfo::Swapchain(swapchain) = resource_info { + Ok(swapchain) + } else { + // SAFETY: The `get_unprotected` call above already successfully compared the tag, so + // there is no need to check it again. We always ensure that swapchain IDs get tagged + // with the `SWAPCHAIN_TAG`. + unsafe { hint::unreachable_unchecked() } + } + } + + unsafe fn swapchain_unchecked(&self, id: Id) -> &SwapchainInfo { + // SAFETY: + // * The caller must ensure that the `index` is valid. + // * We never modify the map concurrently. + let resource_info = unsafe { self.inner.index_unchecked_unprotected(id.index()) }; + + if let ResourceInfo::Swapchain(swapchain) = resource_info { + swapchain + } else { + // SAFETY: The caller must ensure that the `id` is valid. + unsafe { hint::unreachable_unchecked() } + } + } + + fn iter(&self) -> IterUnprotected<'_, ResourceInfo> { + // SAFETY: We never modify the map concurrently. + unsafe { self.inner.iter_unprotected() } + } +} + +/// The ID type used to refer to a node within a [`TaskGraph`]. +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +#[repr(transparent)] +pub struct NodeId { + slot: SlotId, +} + +impl NodeId { + fn index(self) -> NodeIndex { + self.slot.index() + } +} + +impl fmt::Debug for NodeId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NodeId") + .field("index", &self.slot.index()) + .field("generation", &self.slot.generation()) + .finish() + } +} + +/// A node within a [`TaskGraph`] that represents a [`Task`] to be executed along with its resource +/// accesses. +pub struct TaskNode { + accesses: ResourceAccesses, + queue_family_type: QueueFamilyType, + queue_family_index: u32, + dependency_level_index: u32, + task: Box>, +} + +pub(crate) struct ResourceAccesses { + inner: Vec, +} + +// TODO: Literally anything else +#[derive(Clone)] +enum ResourceAccess { + Buffer(BufferAccess), + Image(ImageAccess), + Swapchain(SwapchainAccess), +} + +#[derive(Clone)] +struct BufferAccess { + id: Id, + range: BufferRange, + access_type: AccessType, +} + +#[derive(Clone)] +struct ImageAccess { + id: Id, + subresource_range: ImageSubresourceRange, + access_type: AccessType, + layout_type: ImageLayoutType, +} + +#[derive(Clone)] +struct SwapchainAccess { + id: Id, + array_layers: Range, + access_type: AccessType, + layout_type: ImageLayoutType, +} + +impl TaskNode { + fn new(queue_family_type: QueueFamilyType, task: impl Task) -> Self { + TaskNode { + accesses: ResourceAccesses { inner: Vec::new() }, + queue_family_type, + queue_family_index: 0, + dependency_level_index: 0, + task: Box::new(task), + } + } + + /// Returns the queue family type the task node was created with. + #[inline] + #[must_use] + pub fn queue_family_type(&self) -> QueueFamilyType { + self.queue_family_type + } + + /// Returns a reference to the task the task node was created with. + #[inline] + #[must_use] + pub fn task(&self) -> &dyn Task { + &*self.task + } + + /// Returns a mutable reference to the task the task node was created with. + #[inline] + #[must_use] + pub fn task_mut(&mut self) -> &mut dyn Task { + &mut *self.task + } + + /// Returns `true` if the task node has access of the given `access_type` to the buffer + /// corresponding to `id` where the given `range` is contained within the access's range. + #[inline] + #[must_use] + pub fn contains_buffer_access( + &self, + id: Id, + range: BufferRange, + access_type: AccessType, + ) -> bool { + self.accesses.contains_buffer_access(id, range, access_type) + } + + /// Returns `true` if the task node has access of the given `access_type` and `layout_type` to + /// the image corresponding to `id` where the given `subresource_range` is contained within + /// the access's subresource range. + #[inline] + #[must_use] + pub fn contains_image_access( + &self, + id: Id, + subresource_range: ImageSubresourceRange, + access_type: AccessType, + layout_type: ImageLayoutType, + ) -> bool { + self.accesses + .contains_image_access(id, subresource_range, access_type, layout_type) + } + + /// Returns `true` if the task node has access of the given `access_type` and `layout_type` to + /// the swapchain corresponding to `id` where the given `array_layers` are contained within + /// the access's array layers. + #[inline] + #[must_use] + pub fn contains_swapchain_access( + &self, + id: Id, + array_layers: Range, + access_type: AccessType, + layout_type: ImageLayoutType, + ) -> bool { + self.accesses + .contains_swapchain_access(id, array_layers, access_type, layout_type) + } +} + +impl ResourceAccesses { + pub(crate) fn contains_buffer_access( + &self, + id: Id, + range: BufferRange, + access_type: AccessType, + ) -> bool { + debug_assert!(!range.is_empty()); + + self.inner.iter().any(|resource_access| { + matches!(resource_access, ResourceAccess::Buffer(a) if a.id == id + && a.access_type == access_type + && a.range.start <= range.start + && range.end <= a.range.end) + }) + } + + pub(crate) fn contains_image_access( + &self, + id: Id, + subresource_range: ImageSubresourceRange, + access_type: AccessType, + layout_type: ImageLayoutType, + ) -> bool { + debug_assert!(!subresource_range.aspects.is_empty()); + debug_assert!(!subresource_range.mip_levels.is_empty()); + debug_assert!(!subresource_range.array_layers.is_empty()); + + self.inner.iter().any(|resource_access| { + matches!(resource_access, ResourceAccess::Image(a) if a.id == id + && a.access_type == access_type + && a.layout_type == layout_type + && a.subresource_range.aspects.contains(subresource_range.aspects) + && a.subresource_range.mip_levels.start <= subresource_range.mip_levels.start + && subresource_range.mip_levels.end <= a.subresource_range.mip_levels.end + && a.subresource_range.array_layers.start <= subresource_range.array_layers.start + && subresource_range.array_layers.end <= a.subresource_range.array_layers.end) + }) + } + + pub(crate) fn contains_swapchain_access( + &self, + id: Id, + array_layers: Range, + access_type: AccessType, + layout_type: ImageLayoutType, + ) -> bool { + debug_assert!(!array_layers.is_empty()); + + self.inner.iter().any(|resource_access| { + matches!(resource_access, ResourceAccess::Swapchain(a) if a.id == id + && a.access_type == access_type + && a.layout_type == layout_type + && a.array_layers.start <= array_layers.start + && array_layers.end <= a.array_layers.end) + }) + } +} + +/// A builder used to add resource accesses to a [`TaskNode`]. +pub struct TaskNodeBuilder<'a, W: ?Sized> { + id: NodeId, + task_node: &'a mut TaskNode, + resources: &'a Resources, +} + +impl TaskNodeBuilder<'_, W> { + /// Adds a buffer access to this task node. + /// + /// # Panics + /// + /// - Panics if `id` is not a valid virtual resource ID. + /// - Panics if `range` doesn't denote a valid range of the buffer. + /// - Panics if `access_type` isn't a valid buffer access type. + pub fn buffer_access( + &mut self, + id: Id, + range: BufferRange, + access_type: AccessType, + ) -> &mut Self { + let buffer = self.resources.buffer(id).expect("invalid buffer"); + + assert!(range.end <= buffer.size); + assert!(!range.is_empty()); + + assert!(access_type.is_valid_buffer_access_type()); + + // SAFETY: We checked the safety preconditions above. + unsafe { self.buffer_access_unchecked(id, range, access_type) } + } + + /// Adds a buffer access to this task node without doing any checks. + /// + /// # Safety + /// + /// - `id` must be a valid virtual resource ID. + /// - `range` must denote a valid range of the buffer. + /// - `access_type` must be a valid buffer access type. + #[inline] + pub unsafe fn buffer_access_unchecked( + &mut self, + id: Id, + range: BufferRange, + access_type: AccessType, + ) -> &mut Self { + self.task_node + .accesses + .inner + .push(ResourceAccess::Buffer(BufferAccess { + id, + range, + access_type, + })); + + self + } + + /// Adds an image access to this task node. + /// + /// # Panics + /// + /// - Panics if `id` is not a valid virtual resource ID. + /// - Panics if `subresource_range` doesn't denote a valid subresource range of the image. + /// - Panics if `access_type` isn't a valid image access type. + pub fn image_access( + &mut self, + id: Id, + mut subresource_range: ImageSubresourceRange, + access_type: AccessType, + layout_type: ImageLayoutType, + ) -> &mut Self { + let image = self.resources.image(id).expect("invalid image"); + + if image.flags.contains(ImageCreateFlags::DISJOINT) { + subresource_range.aspects -= ImageAspects::COLOR; + subresource_range.aspects |= match image.format.planes().len() { + 2 => ImageAspects::PLANE_0 | ImageAspects::PLANE_1, + 3 => ImageAspects::PLANE_0 | ImageAspects::PLANE_1 | ImageAspects::PLANE_2, + _ => unreachable!(), + }; + } + + assert!(image.format.aspects().contains(subresource_range.aspects)); + assert!(subresource_range.mip_levels.end <= image.mip_levels); + assert!(subresource_range.array_layers.end <= image.array_layers); + assert!(!subresource_range.aspects.is_empty()); + assert!(!subresource_range.mip_levels.is_empty()); + assert!(!subresource_range.array_layers.is_empty()); + + assert!(access_type.is_valid_image_access_type()); + + // SAFETY: We checked the safety preconditions above. + unsafe { self.image_access_unchecked(id, subresource_range, access_type, layout_type) } + } + + /// Adds an image access to this task node without doing any checks. + /// + /// # Safety + /// + /// - `id` must be a valid virtual resource ID. + /// - `subresource_range` must denote a valid subresource range of the image. If the image + /// flags contain `ImageCreateFlags::DISJOINT`, then the color aspect is not considered + /// valid. + /// - `access_type` must be a valid image access type. + #[inline] + pub unsafe fn image_access_unchecked( + &mut self, + id: Id, + mut subresource_range: ImageSubresourceRange, + access_type: AccessType, + mut layout_type: ImageLayoutType, + ) -> &mut Self { + // Normalize the layout type so that comparisons of accesses are predictable. + if access_type.image_layout() == ImageLayout::General { + layout_type = ImageLayoutType::Optimal; + } + + self.task_node + .accesses + .inner + .push(ResourceAccess::Image(ImageAccess { + id, + subresource_range, + access_type, + layout_type, + })); + + self + } + + /// Adds a swapchain image access to this task node. + /// + /// # Panics + /// + /// - Panics if `id` is not a valid virtual resource ID. + /// - Panics if `array_layers` doesn't denote a valid range of array layers of the swapchain. + /// - Panics if `access_type` isn't a valid image access type. + pub fn swapchain_access( + &mut self, + id: Id, + array_layers: Range, + access_type: AccessType, + layout_type: ImageLayoutType, + ) -> &mut Self { + let swapchain = self.resources.swapchain(id).expect("invalid swapchain"); + + assert!(array_layers.end <= swapchain.image_array_layers); + assert!(!array_layers.is_empty()); + + assert!(access_type.is_valid_image_access_type()); + + // SAFETY: We checked the safety preconditions above. + unsafe { self.swapchain_access_unchecked(id, array_layers, access_type, layout_type) } + } + + /// Adds a swapchain image access to this task node without doing any checks. + /// + /// # Safety + /// + /// - `id` must be a valid virtual resource ID. + /// - `array_layers` must denote a valid range of array layers of the swapchain. + /// - `access_type` must be a valid image access type. + #[inline] + pub unsafe fn swapchain_access_unchecked( + &mut self, + id: Id, + array_layers: Range, + access_type: AccessType, + mut layout_type: ImageLayoutType, + ) -> &mut Self { + // Normalize the layout type so that comparisons of accesses are predictable. + if access_type.image_layout() == ImageLayout::General { + layout_type = ImageLayoutType::Optimal; + } + + self.task_node + .accesses + .inner + .push(ResourceAccess::Swapchain(SwapchainAccess { + id, + access_type, + layout_type, + array_layers, + })); + + self + } + + /// Finishes building the task node and returns the ID of the built node. + #[inline] + pub fn build(self) -> NodeId { + self.id + } +} + +/// A [`TaskGraph`] that has been compiled into an executable form. +pub struct ExecutableTaskGraph { + graph: TaskGraph, + instructions: Vec, + submissions: Vec, + buffer_barriers: Vec, + image_barriers: Vec, + semaphores: Vec, + swapchains: SmallVec<[Id; 1]>, + present_queue: Option>, +} + +// FIXME: Initial queue family ownership transfers +struct Submission { + queue: Arc, + initial_buffer_barrier_range: Range, + initial_image_barrier_range: Range, + instruction_range: Range, +} + +type InstructionIndex = usize; + +#[derive(Clone)] +enum Instruction { + WaitAcquire { + swapchain_id: Id, + stage_mask: PipelineStages, + }, + WaitSemaphore { + semaphore_index: SemaphoreIndex, + stage_mask: PipelineStages, + }, + ExecuteTask { + node_index: NodeIndex, + }, + PipelineBarrier { + buffer_barrier_range: Range, + image_barrier_range: Range, + }, + // TODO: + // SetEvent { + // event_index: EventIndex, + // buffer_barriers: Range, + // image_barriers: Range, + // }, + // WaitEvent { + // event_index: EventIndex, + // buffer_barriers: Range, + // image_barriers: Range, + // }, + SignalSemaphore { + semaphore_index: SemaphoreIndex, + stage_mask: PipelineStages, + }, + SignalPresent { + swapchain_id: Id, + stage_mask: PipelineStages, + }, + FlushSubmit, + Submit, +} + +type SemaphoreIndex = usize; + +type BarrierIndex = u32; + +struct BufferMemoryBarrier { + src_stage_mask: PipelineStages, + src_access_mask: AccessFlags, + dst_stage_mask: PipelineStages, + dst_access_mask: AccessFlags, + src_queue_family_index: u32, + dst_queue_family_index: u32, + buffer: Id, + range: BufferRange, +} + +struct ImageMemoryBarrier { + src_stage_mask: PipelineStages, + src_access_mask: AccessFlags, + dst_stage_mask: PipelineStages, + dst_access_mask: AccessFlags, + old_layout: ImageLayout, + new_layout: ImageLayout, + src_queue_family_index: u32, + dst_queue_family_index: u32, + image: ImageReference, + subresource_range: ImageSubresourceRange, +} + +// TODO: This really ought not to be necessary. +#[derive(Clone, Copy)] +enum ImageReference { + Normal(Id), + Swapchain(Id), +} + +impl ExecutableTaskGraph { + /// Returns a reference to the task node corresponding to `id`. + #[inline] + pub fn task_node(&self, id: NodeId) -> Result<&TaskNode> { + self.graph.task_node(id) + } + + /// Returns a mutable reference to the task node corresponding to `id`. + #[inline] + pub fn task_node_mut(&mut self, id: NodeId) -> Result<&mut TaskNode> { + self.graph.task_node_mut(id) + } + + /// Returns an iterator over all [`TaskNode`]s. + #[inline] + pub fn task_nodes(&self) -> TaskNodes<'_, W> { + self.graph.task_nodes() + } + + /// Returns an iterator over all [`TaskNode`]s that allows you to mutate them. + #[inline] + pub fn task_nodes_mut(&mut self) -> TaskNodesMut<'_, W> { + self.graph.task_nodes_mut() + } +} + +unsafe impl DeviceOwned for ExecutableTaskGraph { + #[inline] + fn device(&self) -> &Arc { + self.submissions[0].queue.device() + } +} + +/// An iterator over all [`TaskNode`]s within a [`TaskGraph`]. +/// +/// This type is created by the [`task_nodes`] method on [`TaskGraph`]. +/// +/// [`task_nodes`]: TaskGraph::task_nodes +pub struct TaskNodes<'a, W: ?Sized> { + inner: concurrent_slotmap::IterUnprotected<'a, Node>, +} + +impl fmt::Debug for TaskNodes<'_, W> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TaskNodes").finish_non_exhaustive() + } +} + +impl<'a, W: ?Sized> Iterator for TaskNodes<'a, W> { + type Item = &'a TaskNode; + + #[inline] + fn next(&mut self) -> Option { + loop { + let (_, node) = self.inner.next()?; + + if let NodeInner::Task(task_node) = &node.inner { + break Some(task_node); + } + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl DoubleEndedIterator for TaskNodes<'_, W> { + #[inline] + fn next_back(&mut self) -> Option { + loop { + let (_, node) = self.inner.next_back()?; + + if let NodeInner::Task(task_node) = &node.inner { + break Some(task_node); + } + } + } +} + +impl FusedIterator for TaskNodes<'_, W> {} + +/// An iterator over all [`TaskNode`]s within a [`TaskGraph`] that allows you to mutate them. +/// +/// This type is created by the [`task_nodes_mut`] method on [`TaskGraph`]. +/// +/// [`task_nodes_mut`]: TaskGraph::task_nodes_mut +pub struct TaskNodesMut<'a, W: ?Sized> { + inner: concurrent_slotmap::IterMut<'a, Node>, +} + +impl fmt::Debug for TaskNodesMut<'_, W> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TaskNodesMut").finish_non_exhaustive() + } +} + +impl<'a, W: ?Sized> Iterator for TaskNodesMut<'a, W> { + type Item = &'a mut TaskNode; + + #[inline] + fn next(&mut self) -> Option { + loop { + let (_, node) = self.inner.next()?; + + if let NodeInner::Task(task_node) = &mut node.inner { + break Some(task_node); + } + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.inner.size_hint() + } +} + +impl DoubleEndedIterator for TaskNodesMut<'_, W> { + #[inline] + fn next_back(&mut self) -> Option { + loop { + let (_, node) = self.inner.next_back()?; + + if let NodeInner::Task(task_node) = &mut node.inner { + break Some(task_node); + } + } + } +} + +impl FusedIterator for TaskNodesMut<'_, W> {} + +type Result = ::std::result::Result; + +/// Error that can happen when doing operations on a [`TaskGraph`]. +#[derive(Debug, PartialEq, Eq)] +pub enum TaskGraphError { + InvalidNode, + InvalidNodeType, + InvalidEdge, + DuplicateEdge, +} + +impl fmt::Display for TaskGraphError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let msg = match self { + Self::InvalidNode => "a node with the given ID does not exist", + Self::InvalidNodeType => { + "the node with the given ID has a type that is incompatible with the operation" + } + Self::InvalidEdge => "an edge between the given nodes does not exist", + Self::DuplicateEdge => "an edge between the given nodes already exists", + }; + + f.write_str(msg) + } +} + +impl Error for TaskGraphError {} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{TaskContext, TaskResult}; + + struct DummyTask; + + impl Task for DummyTask { + type World = (); + + unsafe fn execute(&self, _tcx: &mut TaskContext<'_>, _world: &Self::World) -> TaskResult { + Ok(()) + } + } + + #[test] + fn basic_usage1() { + let mut graph = TaskGraph::new(10, 0); + + let x = graph + .create_task_node("", QueueFamilyType::Graphics, DummyTask) + .build(); + let y = graph + .create_task_node("", QueueFamilyType::Graphics, DummyTask) + .build(); + + graph.add_edge(x, y).unwrap(); + assert!(graph.nodes.node(x).unwrap().out_edges.contains(&y.index())); + assert!(graph.nodes.node(y).unwrap().in_edges.contains(&x.index())); + assert_eq!(graph.add_edge(x, y), Err(TaskGraphError::DuplicateEdge)); + + graph.remove_edge(x, y).unwrap(); + assert!(!graph.nodes.node(x).unwrap().out_edges.contains(&y.index())); + assert!(!graph.nodes.node(y).unwrap().in_edges.contains(&x.index())); + + assert_eq!(graph.remove_edge(x, y), Err(TaskGraphError::InvalidEdge)); + + graph.add_edge(y, x).unwrap(); + assert!(graph.nodes.node(y).unwrap().out_edges.contains(&x.index())); + assert!(graph.nodes.node(x).unwrap().in_edges.contains(&y.index())); + assert_eq!(graph.add_edge(y, x), Err(TaskGraphError::DuplicateEdge)); + + graph.remove_edge(y, x).unwrap(); + assert!(!graph.nodes.node(y).unwrap().out_edges.contains(&x.index())); + assert!(!graph.nodes.node(x).unwrap().in_edges.contains(&y.index())); + + assert_eq!(graph.remove_edge(y, x), Err(TaskGraphError::InvalidEdge)); + } + + #[test] + fn basic_usage2() { + let mut graph = TaskGraph::new(10, 0); + + let x = graph + .create_task_node("", QueueFamilyType::Graphics, DummyTask) + .build(); + let y = graph + .create_task_node("", QueueFamilyType::Graphics, DummyTask) + .build(); + let z = graph + .create_task_node("", QueueFamilyType::Graphics, DummyTask) + .build(); + + assert!(graph.task_node(x).is_ok()); + assert!(graph.task_node(y).is_ok()); + assert!(graph.task_node(z).is_ok()); + assert!(graph.task_node_mut(x).is_ok()); + assert!(graph.task_node_mut(y).is_ok()); + assert!(graph.task_node_mut(z).is_ok()); + + graph.add_edge(x, y).unwrap(); + graph.add_edge(y, z).unwrap(); + assert!(graph.nodes.node(x).unwrap().out_edges.contains(&y.index())); + assert!(graph.nodes.node(z).unwrap().in_edges.contains(&y.index())); + + graph.remove_task_node(y).unwrap(); + assert!(!graph.nodes.node(x).unwrap().out_edges.contains(&y.index())); + assert!(!graph.nodes.node(z).unwrap().in_edges.contains(&y.index())); + + assert!(matches!( + graph.remove_task_node(y), + Err(TaskGraphError::InvalidNode), + )); + } + + #[test] + fn self_referential_node() { + let mut graph = TaskGraph::new(10, 0); + + let x = graph + .create_task_node("", QueueFamilyType::Graphics, DummyTask) + .build(); + + assert_eq!(graph.add_edge(x, x), Err(TaskGraphError::InvalidNode)); + } +} diff --git a/vulkano-taskgraph/src/lib.rs b/vulkano-taskgraph/src/lib.rs index 98322109..30402e42 100644 --- a/vulkano-taskgraph/src/lib.rs +++ b/vulkano-taskgraph/src/lib.rs @@ -3,7 +3,10 @@ #![forbid(unsafe_op_in_unsafe_fn)] use concurrent_slotmap::SlotId; -use resource::{BufferRange, BufferState, DeathRow, ImageState, Resources, SwapchainState}; +use graph::ResourceAccesses; +use resource::{ + AccessType, BufferRange, BufferState, DeathRow, ImageState, Resources, SwapchainState, +}; use std::{ any::{Any, TypeId}, cell::Cell, @@ -27,6 +30,7 @@ use vulkano::{ DeviceSize, ValidationError, VulkanError, }; +pub mod graph; pub mod resource; /// A task represents a unit of work to be recorded to a command buffer. @@ -117,6 +121,7 @@ pub struct TaskContext<'a> { death_row: Cell>, current_command_buffer: Cell>, command_buffers: Cell>>, + accesses: &'a ResourceAccesses, } impl<'a> TaskContext<'a> { @@ -246,6 +251,7 @@ impl<'a> TaskContext<'a> { #[cold] unsafe fn invalidate_subbuffer( tcx: &TaskContext<'_>, + id: Id, subbuffer: &Subbuffer<[u8]>, allocation: &ResourceMemory, atom_size: DeviceAlignment, @@ -259,7 +265,7 @@ impl<'a> TaskContext<'a> { ); let range = Range { start, end }; - tcx.validate_read_buffer(subbuffer.buffer(), range.clone())?; + tcx.validate_read_buffer(id, range.clone())?; let memory_range = MappedMemoryRange { offset: range.start, @@ -310,10 +316,10 @@ impl<'a> TaskContext<'a> { // SAFETY: // `subbuffer.mapped_slice()` didn't return an error, which means that the subbuffer // falls within the mapped range of the memory. - unsafe { invalidate_subbuffer(self, subbuffer.as_bytes(), allocation, atom_size) }?; + unsafe { invalidate_subbuffer(self, id, subbuffer.as_bytes(), allocation, atom_size) }?; } else { let range = subbuffer.offset()..subbuffer.offset() + subbuffer.size(); - self.validate_write_buffer(buffer, range)?; + self.validate_write_buffer(id, range)?; } // SAFETY: We checked that the task has read access to the subbuffer above, which also @@ -325,8 +331,25 @@ impl<'a> TaskContext<'a> { Ok(BufferReadGuard { data }) } - fn validate_read_buffer(&self, _buffer: &Buffer, _range: BufferRange) -> TaskResult { - todo!() + fn validate_read_buffer( + &self, + id: Id, + range: BufferRange, + ) -> Result<(), Box> { + if !self + .accesses + .contains_buffer_access(id, range, AccessType::HostRead) + { + return Err(Box::new(ValidationError { + context: "TaskContext::read_buffer".into(), + problem: "the task node does not have an access of type `AccessType::HostRead` \ + for the range of the buffer" + .into(), + ..Default::default() + })); + } + + Ok(()) } /// Gets read access to a portion of the buffer corresponding to `id` without checking if this @@ -429,6 +452,7 @@ impl<'a> TaskContext<'a> { #[cold] unsafe fn invalidate_subbuffer( tcx: &TaskContext<'_>, + id: Id, subbuffer: &Subbuffer<[u8]>, allocation: &ResourceMemory, atom_size: DeviceAlignment, @@ -442,7 +466,7 @@ impl<'a> TaskContext<'a> { ); let range = Range { start, end }; - tcx.validate_write_buffer(subbuffer.buffer(), range.clone())?; + tcx.validate_write_buffer(id, range.clone())?; let memory_range = MappedMemoryRange { offset: range.start, @@ -493,10 +517,10 @@ impl<'a> TaskContext<'a> { // SAFETY: // `subbuffer.mapped_slice()` didn't return an error, which means that the subbuffer // falls within the mapped range of the memory. - unsafe { invalidate_subbuffer(self, subbuffer.as_bytes(), allocation, atom_size) }?; + unsafe { invalidate_subbuffer(self, id, subbuffer.as_bytes(), allocation, atom_size) }?; } else { let range = subbuffer.offset()..subbuffer.offset() + subbuffer.size(); - self.validate_write_buffer(buffer, range)?; + self.validate_write_buffer(id, range)?; } // SAFETY: We checked that the task has write access to the subbuffer above, which also @@ -512,8 +536,25 @@ impl<'a> TaskContext<'a> { }) } - fn validate_write_buffer(&self, _buffer: &Buffer, _range: BufferRange) -> TaskResult { - todo!() + fn validate_write_buffer( + &self, + id: Id, + range: BufferRange, + ) -> Result<(), Box> { + if !self + .accesses + .contains_buffer_access(id, range, AccessType::HostWrite) + { + return Err(Box::new(ValidationError { + context: "TaskContext::write_buffer".into(), + problem: "the task node does not have an access of type `AccessType::HostWrite` \ + for the range of the buffer" + .into(), + ..Default::default() + })); + } + + Ok(()) } /// Gets write access to a portion of the buffer corresponding to `id` without checking if this @@ -768,7 +809,7 @@ impl InvalidSlotError { impl fmt::Display for InvalidSlotError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let &InvalidSlotError { slot } = self; - let object_type = match slot.tag() { + let object_type = match slot.tag() & OBJECT_TYPE_MASK { 0 => ObjectType::Buffer, 1 => ObjectType::Image, 2 => ObjectType::Swapchain, @@ -860,6 +901,10 @@ impl Id { marker: PhantomData, } } + + fn index(self) -> u32 { + self.slot.index() + } } impl Clone for Id { @@ -874,8 +919,8 @@ impl Copy for Id {} impl fmt::Debug for Id { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Id") - .field("generation", &self.slot.generation()) .field("index", &self.slot.index()) + .field("generation", &self.slot.generation()) .finish() } } @@ -940,6 +985,13 @@ enum ObjectType { Flight = 3, } +const BUFFER_TAG: u32 = ObjectType::Buffer as u32; +const IMAGE_TAG: u32 = ObjectType::Image as u32; +const SWAPCHAIN_TAG: u32 = ObjectType::Swapchain as u32; +const FLIGHT_TAG: u32 = ObjectType::Flight as u32; + +const OBJECT_TYPE_MASK: u32 = 0b11; + // SAFETY: ZSTs can always be safely produced out of thin air, barring any safety invariants they // might impose, which in the case of `NonExhaustive` are none. const NE: vulkano::NonExhaustive = diff --git a/vulkano-taskgraph/src/resource.rs b/vulkano-taskgraph/src/resource.rs index c0e1c200..0e22597d 100644 --- a/vulkano-taskgraph/src/resource.rs +++ b/vulkano-taskgraph/src/resource.rs @@ -1,6 +1,6 @@ //! Synchronization state tracking of all resources. -use crate::{Id, InvalidSlotError, ObjectType, Ref}; +use crate::{Id, InvalidSlotError, Ref, BUFFER_TAG, FLIGHT_TAG, IMAGE_TAG, SWAPCHAIN_TAG}; use ash::vk; use concurrent_slotmap::{epoch, SlotMap}; use parking_lot::{Mutex, MutexGuard}; @@ -38,11 +38,6 @@ use vulkano::{ static REGISTERED_DEVICES: Mutex> = Mutex::new(Vec::new()); -const BUFFER_TAG: u32 = ObjectType::Buffer as u32; -const IMAGE_TAG: u32 = ObjectType::Image as u32; -const SWAPCHAIN_TAG: u32 = ObjectType::Swapchain as u32; -const FLIGHT_TAG: u32 = ObjectType::Flight as u32; - /// Tracks the synchronization state of all resources. /// /// There can only exist one `Resources` collection per device, because there must only be one @@ -117,7 +112,6 @@ pub struct Flight { #[derive(Debug)] pub(crate) struct FlightState { - pub(crate) swapchains: SmallVec<[Id; 1]>, pub(crate) death_rows: SmallVec<[DeathRow; 3]>, } @@ -214,6 +208,7 @@ impl Resources { /// # Panics /// /// - Panics if the instance of `surface` is not the same as that of `self.device()`. + /// - Panics if `flight_id` is invalid. /// - Panics if `create_info.min_image_count` is not greater than or equal to the number of /// [frames] of the flight corresponding to `flight_id`. /// @@ -276,7 +271,6 @@ impl Resources { current_frame: AtomicU32::new(0), fences, state: Mutex::new(FlightState { - swapchains: SmallVec::new(), death_rows: (0..frame_count.get()).map(|_| Vec::new()).collect(), }), }; @@ -462,30 +456,11 @@ impl Resources { last_accesses: Mutex::new(RangeMap::new()), }; - unsafe { - state.set_access( - ImageSubresourceRange { - aspects: ImageAspects::COLOR, - mip_levels: 0..1, - array_layers: 0..state.swapchain.image_array_layers(), - }, - ImageAccess::NONE, - ); - } + unsafe { state.set_access(0..state.swapchain.image_array_layers(), ImageAccess::NONE) }; let slot = self.swapchains.insert_with_tag(state, SWAPCHAIN_TAG, guard); - let id = Id::new(slot); - self.flights - .get(flight_id.slot, guard) - .unwrap() - .state - // FIXME: - .lock() - .swapchains - .push(id); - - Ok(id) + Ok(Id::new(slot)) } /// Removes the buffer corresponding to `id`. @@ -524,20 +499,10 @@ impl Resources { /// pending command buffer, and if it is used in any command buffer that's in the executable /// or recording state, that command buffer must never be executed. pub unsafe fn remove_swapchain(&self, id: Id) -> Result> { - let state = self - .swapchains + self.swapchains .remove(id.slot, self.pin()) .map(Ref) - .ok_or(InvalidSlotError::new(id))?; - let flight_id = state.flight_id; - - let flight = self.flights.get(flight_id.slot, self.pin()).unwrap(); - // FIXME: - let swapchains = &mut flight.state.lock().swapchains; - let index = swapchains.iter().position(|&x| x == id).unwrap(); - swapchains.remove(index); - - Ok(state) + .ok_or(InvalidSlotError::new(id)) } /// Returns the buffer corresponding to `id`. @@ -659,7 +624,7 @@ impl BufferState { assert!(!range.is_empty()); BufferAccesses { - inner: MutexGuard::leak(self.last_accesses.lock()).overlapping(range), + overlapping: MutexGuard::leak(self.last_accesses.lock()).overlapping(range), // SAFETY: We locked the mutex above. _guard: unsafe { AccessesGuard::new(&self.last_accesses) }, } @@ -730,14 +695,14 @@ impl ImageState { #[inline] pub fn accesses(&self, subresource_range: ImageSubresourceRange) -> ImageAccesses<'_> { let subresource_ranges = SubresourceRanges::from_image(&self.image, subresource_range); - let map = MutexGuard::leak(self.last_accesses.lock()); + let last_accesses = MutexGuard::leak(self.last_accesses.lock()); ImageAccesses { mip_levels: self.image.mip_levels(), array_layers: self.image.array_layers(), subresource_ranges, - overlapping: map.overlapping(0..0), - map, + overlapping: last_accesses.overlapping(0..0), + last_accesses, // SAFETY: We locked the mutex above. _guard: unsafe { AccessesGuard::new(&self.last_accesses) }, } @@ -862,31 +827,33 @@ impl SwapchainState { &self.images[self.current_image_index.load(Ordering::Relaxed) as usize] } - pub(crate) fn accesses(&self, subresource_range: ImageSubresourceRange) -> ImageAccesses<'_> { - assert_eq!(subresource_range.aspects, ImageAspects::COLOR); - + pub(crate) fn accesses(&self, array_layers: Range) -> ImageAccesses<'_> { + let subresource_range = ImageSubresourceRange { + aspects: ImageAspects::COLOR, + mip_levels: 0..1, + array_layers, + }; let subresource_ranges = SubresourceRanges::new(subresource_range, 1, self.swapchain.image_array_layers()); - let map = MutexGuard::leak(self.last_accesses.lock()); + let last_accesses = MutexGuard::leak(self.last_accesses.lock()); ImageAccesses { mip_levels: 1, array_layers: self.swapchain.image_array_layers(), subresource_ranges, - overlapping: map.overlapping(0..0), - map, + overlapping: last_accesses.overlapping(0..0), + last_accesses, // SAFETY: We locked the mutex above. _guard: unsafe { AccessesGuard::new(&self.last_accesses) }, } } - pub(crate) unsafe fn set_access( - &self, - subresource_range: ImageSubresourceRange, - access: ImageAccess, - ) { - assert_eq!(subresource_range.aspects, ImageAspects::COLOR); - + pub(crate) unsafe fn set_access(&self, array_layers: Range, access: ImageAccess) { + let subresource_range = ImageSubresourceRange { + aspects: ImageAspects::COLOR, + mip_levels: 0..1, + array_layers, + }; let mut last_accesses = self.last_accesses.lock(); for range in @@ -964,7 +931,7 @@ pub type BufferRange = Range; /// /// [`accesses`]: BufferState::accesses pub struct BufferAccesses<'a> { - inner: rangemap::map::Overlapping<'a, DeviceSize, BufferAccess, Range>, + overlapping: rangemap::map::Overlapping<'a, DeviceSize, BufferAccess, Range>, _guard: AccessesGuard<'a, BufferAccess>, } @@ -973,7 +940,7 @@ impl<'a> Iterator for BufferAccesses<'a> { #[inline] fn next(&mut self) -> Option { - self.inner + self.overlapping .next() .map(|(range, access)| (range.clone(), access)) } @@ -991,7 +958,7 @@ pub struct ImageAccesses<'a> { array_layers: u32, subresource_ranges: SubresourceRanges, overlapping: rangemap::map::Overlapping<'a, DeviceSize, ImageAccess, Range>, - map: &'a RangeMap, + last_accesses: &'a RangeMap, _guard: AccessesGuard<'a, ImageAccess>, } @@ -1000,17 +967,17 @@ impl<'a> Iterator for ImageAccesses<'a> { #[inline] fn next(&mut self) -> Option { - if let Some((range, access)) = self.overlapping.next() { - let subresource_range = - range_to_subresources(range.clone(), self.mip_levels, self.array_layers); + loop { + if let Some((range, access)) = self.overlapping.next() { + let subresource_range = + range_to_subresources(range.clone(), self.mip_levels, self.array_layers); - Some((subresource_range, access)) - } else if let Some(range) = self.subresource_ranges.next() { - self.overlapping = self.map.overlapping(range); - - self.next() - } else { - None + break Some((subresource_range, access)); + } else if let Some(range) = self.subresource_ranges.next() { + self.overlapping = self.last_accesses.overlapping(range); + } else { + break None; + } } } } @@ -1080,6 +1047,7 @@ impl SubresourceRanges { ) -> Self { assert!(subresource_range.mip_levels.end <= image_mip_levels); assert!(subresource_range.array_layers.end <= image_array_layers); + assert!(!subresource_range.aspects.is_empty()); assert!(!subresource_range.mip_levels.is_empty()); assert!(!subresource_range.array_layers.is_empty()); @@ -1839,6 +1807,24 @@ access_types! { } } +impl AccessType { + pub(crate) const fn is_valid_buffer_access_type(self) -> bool { + // Let's reuse the image layout lookup table, since it already exists. + let image_layout = self.image_layout(); + + matches!(image_layout, ImageLayout::Undefined) && !matches!(self, AccessType::None) + } + + pub(crate) const fn is_valid_image_access_type(self) -> bool { + let image_layout = self.image_layout(); + + !matches!( + image_layout, + ImageLayout::Undefined | ImageLayout::PresentSrc, + ) + } +} + /// Specifies which type of layout an image subresource is accessed in. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[non_exhaustive] diff --git a/vulkano/src/sync/mod.rs b/vulkano/src/sync/mod.rs index 8cb495eb..3c9b372b 100644 --- a/vulkano/src/sync/mod.rs +++ b/vulkano/src/sync/mod.rs @@ -74,6 +74,23 @@ where Concurrent(I), } +impl Sharing +where + I: IntoIterator, +{ + /// Returns `true` if `self` is the `Exclusive` variant. + #[inline] + pub fn is_exclusive(&self) -> bool { + matches!(self, Self::Exclusive) + } + + /// Returns `true` if `self` is the `Concurrent` variant. + #[inline] + pub fn is_concurrent(&self) -> bool { + matches!(self, Self::Concurrent(..)) + } +} + /// How the memory of a resource is currently being accessed. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(crate) enum CurrentAccess {