Replace Iterator parameters with IntoIterator where possible, replace various iterator types with impl Iterator (#1719)

This commit is contained in:
Rua 2021-09-29 08:15:39 +02:00 committed by GitHub
parent 131f5b0f0f
commit 17820738d8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 178 additions and 330 deletions

View File

@ -216,7 +216,7 @@ pub(super) fn reflect<'a, I>(
types_registry: &'a mut HashMap<String, RegisteredType>,
) -> Result<(TokenStream, TokenStream), Error>
where
I: Iterator<Item = &'a str>,
I: IntoIterator<Item = &'a str>,
{
let struct_name = Ident::new(&format!("{}Shader", prefix), Span::call_site());
let spirv = Spirv::new(words)?;
@ -325,7 +325,7 @@ where
entry_points_inside_impl.push(entry_point);
}
let include_bytes = input_paths.map(|s| {
let include_bytes = input_paths.into_iter().map(|s| {
quote! {
// using include_bytes here ensures that changing the shader will force recompilation.
// The bytes themselves can be optimized out by the compiler as they are unused.

View File

@ -157,9 +157,12 @@ impl<T> CpuAccessibleBuffer<[T]> {
data: I,
) -> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryAllocError>
where
I: ExactSizeIterator<Item = T>,
I: IntoIterator<Item = T>,
I::IntoIter: ExactSizeIterator,
T: Content + 'static,
{
let data = data.into_iter();
unsafe {
let uninitialized = CpuAccessibleBuffer::uninitialized_array(
device,

View File

@ -420,11 +420,14 @@ where
fn try_next_impl<I>(
&self,
cur_buf_mutex: &mut MutexGuard<Option<Arc<ActualBuffer<A>>>>,
mut data: I,
) -> Result<CpuBufferPoolChunk<T, A>, I>
data: I,
) -> Result<CpuBufferPoolChunk<T, A>, I::IntoIter>
where
I: ExactSizeIterator<Item = T>,
I: IntoIterator<Item = T>,
I::IntoIter: ExactSizeIterator,
{
let mut data = data.into_iter();
// Grab the current buffer. Return `Err` if the pool wasn't "initialized" yet.
let current_buffer = match cur_buf_mutex.clone() {
Some(b) => b,

View File

@ -193,7 +193,8 @@ impl<T> ImmutableBuffer<[T]> {
queue: Arc<Queue>,
) -> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
where
D: ExactSizeIterator<Item = T>,
D: IntoIterator<Item = T>,
D::IntoIter: ExactSizeIterator,
T: 'static + Send + Sync + Sized,
{
let source = CpuAccessibleBuffer::from_iter(

View File

@ -72,7 +72,7 @@ impl UnsafeBuffer {
sparse: Option<SparseLevel>,
) -> Result<(UnsafeBuffer, MemoryRequirements), BufferCreationError>
where
I: Iterator<Item = u32>,
I: IntoIterator<Item = u32>,
{
let fns = device.fns();
@ -126,7 +126,9 @@ impl UnsafeBuffer {
Sharing::Exclusive => {
(ash::vk::SharingMode::EXCLUSIVE, SmallVec::<[u32; 8]>::new())
}
Sharing::Concurrent(ids) => (ash::vk::SharingMode::CONCURRENT, ids.collect()),
Sharing::Concurrent(ids) => {
(ash::vk::SharingMode::CONCURRENT, ids.into_iter().collect())
}
};
let infos = ash::vk::BufferCreateInfo {

View File

@ -16,16 +16,13 @@
//! trait. By default vulkano will use the `StandardCommandPool` struct, but you can implement
//! this trait yourself by wrapping around the `UnsafeCommandPool` type.
use crate::device::physical::QueueFamily;
use crate::device::DeviceOwned;
use crate::OomError;
pub use self::standard::StandardCommandPool;
pub use self::sys::CommandPoolTrimError;
pub use self::sys::UnsafeCommandPool;
pub use self::sys::UnsafeCommandPoolAlloc;
pub use self::sys::UnsafeCommandPoolAllocIter;
use crate::device::physical::QueueFamily;
use crate::device::DeviceOwned;
use crate::OomError;
pub mod standard;
mod sys;

View File

@ -22,7 +22,6 @@ use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::ptr;
use std::sync::Arc;
use std::vec::IntoIter as VecIntoIter;
/// Low-level implementation of a command pool.
///
@ -182,41 +181,43 @@ impl UnsafeCommandPool {
&self,
secondary: bool,
count: u32,
) -> Result<UnsafeCommandPoolAllocIter, OomError> {
if count == 0 {
return Ok(UnsafeCommandPoolAllocIter {
device: self.device.clone(),
list: vec![].into_iter(),
});
}
) -> Result<impl ExactSizeIterator<Item = UnsafeCommandPoolAlloc>, OomError> {
let out = if count == 0 {
vec![]
} else {
let infos = ash::vk::CommandBufferAllocateInfo {
command_pool: self.pool,
level: if secondary {
ash::vk::CommandBufferLevel::SECONDARY
} else {
ash::vk::CommandBufferLevel::PRIMARY
},
command_buffer_count: count,
..Default::default()
};
let infos = ash::vk::CommandBufferAllocateInfo {
command_pool: self.pool,
level: if secondary {
ash::vk::CommandBufferLevel::SECONDARY
} else {
ash::vk::CommandBufferLevel::PRIMARY
},
command_buffer_count: count,
..Default::default()
unsafe {
let fns = self.device.fns();
let mut out = Vec::with_capacity(count as usize);
check_errors(fns.v1_0.allocate_command_buffers(
self.device.internal_object(),
&infos,
out.as_mut_ptr(),
))?;
out.set_len(count as usize);
out
}
};
unsafe {
let fns = self.device.fns();
let mut out = Vec::with_capacity(count as usize);
check_errors(fns.v1_0.allocate_command_buffers(
self.device.internal_object(),
&infos,
out.as_mut_ptr(),
))?;
let device = self.device.clone();
out.set_len(count as usize);
Ok(UnsafeCommandPoolAllocIter {
device: self.device.clone(),
list: out.into_iter(),
})
}
Ok(out
.into_iter()
.map(move |command_buffer| UnsafeCommandPoolAlloc {
command_buffer,
device: device.clone(),
}))
}
/// Frees individual command buffers.
@ -227,10 +228,12 @@ impl UnsafeCommandPool {
///
pub unsafe fn free_command_buffers<I>(&self, command_buffers: I)
where
I: Iterator<Item = UnsafeCommandPoolAlloc>,
I: IntoIterator<Item = UnsafeCommandPoolAlloc>,
{
let command_buffers: SmallVec<[_; 4]> =
command_buffers.map(|cb| cb.command_buffer).collect();
let command_buffers: SmallVec<[_; 4]> = command_buffers
.into_iter()
.map(|cb| cb.command_buffer)
.collect();
let fns = self.device.fns();
fns.v1_0.free_command_buffers(
self.device.internal_object(),
@ -299,34 +302,6 @@ unsafe impl VulkanObject for UnsafeCommandPoolAlloc {
}
}
/// Iterator for newly-allocated command buffers.
#[derive(Debug)]
pub struct UnsafeCommandPoolAllocIter {
device: Arc<Device>,
list: VecIntoIter<ash::vk::CommandBuffer>,
}
impl Iterator for UnsafeCommandPoolAllocIter {
type Item = UnsafeCommandPoolAlloc;
#[inline]
fn next(&mut self) -> Option<UnsafeCommandPoolAlloc> {
self.list
.next()
.map(|command_buffer| UnsafeCommandPoolAlloc {
command_buffer,
device: self.device.clone(),
})
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.list.size_hint()
}
}
impl ExactSizeIterator for UnsafeCommandPoolAllocIter {}
/// Error that can happen when trimming command pools.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CommandPoolTrimError {

View File

@ -385,7 +385,9 @@ impl DescriptorSetBuilder {
));
}
if !image_view.can_be_sampled(&immutable_samplers[descriptor.array_element as usize]) {
if !image_view
.can_be_sampled(&immutable_samplers[descriptor.array_element as usize])
{
return Err(DescriptorSetError::IncompatibleImageViewSampler);
}

View File

@ -12,7 +12,6 @@
pub use self::standard::StdDescriptorPool;
pub use self::sys::DescriptorPoolAllocError;
pub use self::sys::UnsafeDescriptorPool;
pub use self::sys::UnsafeDescriptorPoolAllocIter;
use crate::descriptor_set::layout::DescriptorSetLayout;
use crate::descriptor_set::layout::DescriptorType;
use crate::descriptor_set::UnsafeDescriptorSet;

View File

@ -21,7 +21,6 @@ use std::fmt;
use std::mem::MaybeUninit;
use std::ptr;
use std::sync::Arc;
use std::vec::IntoIter as VecIntoIter;
/// Pool from which descriptor sets are allocated from.
///
@ -153,12 +152,12 @@ impl UnsafeDescriptorPool {
pub unsafe fn alloc<'l, I>(
&mut self,
layouts: I,
) -> Result<UnsafeDescriptorPoolAllocIter, DescriptorPoolAllocError>
) -> Result<impl ExactSizeIterator<Item = UnsafeDescriptorSet>, DescriptorPoolAllocError>
where
I: IntoIterator<Item = &'l DescriptorSetLayout>,
{
let mut variable_descriptor_counts: SmallVec<[_; 8]> = SmallVec::new();
let layouts: SmallVec<[_; 8]> = layouts
.into_iter()
.map(|l| {
@ -182,70 +181,68 @@ impl UnsafeDescriptorPool {
&mut self,
layouts: &SmallVec<[ash::vk::DescriptorSetLayout; 8]>,
variable_descriptor_counts: &SmallVec<[u32; 8]>,
) -> Result<UnsafeDescriptorPoolAllocIter, DescriptorPoolAllocError> {
) -> Result<impl ExactSizeIterator<Item = UnsafeDescriptorSet>, DescriptorPoolAllocError> {
let num = layouts.len();
if num == 0 {
return Ok(UnsafeDescriptorPoolAllocIter {
sets: vec![].into_iter(),
});
}
let variable_desc_count_alloc_info = if variable_descriptor_counts.iter().any(|c| *c != 0) {
Some(ash::vk::DescriptorSetVariableDescriptorCountAllocateInfo {
descriptor_set_count: layouts.len() as u32,
p_descriptor_counts: variable_descriptor_counts.as_ptr(),
.. Default::default()
})
let output = if num == 0 {
vec![]
} else {
None
let variable_desc_count_alloc_info =
if variable_descriptor_counts.iter().any(|c| *c != 0) {
Some(ash::vk::DescriptorSetVariableDescriptorCountAllocateInfo {
descriptor_set_count: layouts.len() as u32,
p_descriptor_counts: variable_descriptor_counts.as_ptr(),
..Default::default()
})
} else {
None
};
let infos = ash::vk::DescriptorSetAllocateInfo {
descriptor_pool: self.pool,
descriptor_set_count: layouts.len() as u32,
p_set_layouts: layouts.as_ptr(),
p_next: if let Some(next) = variable_desc_count_alloc_info.as_ref() {
next as *const _ as *const _
} else {
ptr::null()
},
..Default::default()
};
let mut output = Vec::with_capacity(num);
let fns = self.device.fns();
let ret = fns.v1_0.allocate_descriptor_sets(
self.device.internal_object(),
&infos,
output.as_mut_ptr(),
);
// According to the specs, because `VK_ERROR_FRAGMENTED_POOL` was added after version
// 1.0 of Vulkan, any negative return value except out-of-memory errors must be
// considered as a fragmented pool error.
match ret {
ash::vk::Result::ERROR_OUT_OF_HOST_MEMORY => {
return Err(DescriptorPoolAllocError::OutOfHostMemory);
}
ash::vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
return Err(DescriptorPoolAllocError::OutOfDeviceMemory);
}
ash::vk::Result::ERROR_OUT_OF_POOL_MEMORY_KHR => {
return Err(DescriptorPoolAllocError::OutOfPoolMemory);
}
c if c.as_raw() < 0 => {
return Err(DescriptorPoolAllocError::FragmentedPool);
}
_ => (),
};
output.set_len(num);
output
};
let infos = ash::vk::DescriptorSetAllocateInfo {
descriptor_pool: self.pool,
descriptor_set_count: layouts.len() as u32,
p_set_layouts: layouts.as_ptr(),
p_next: if let Some(next) = variable_desc_count_alloc_info.as_ref() {
next as *const _ as *const _
} else {
ptr::null()
},
..Default::default()
};
let mut output = Vec::with_capacity(num);
let fns = self.device.fns();
let ret = fns.v1_0.allocate_descriptor_sets(
self.device.internal_object(),
&infos,
output.as_mut_ptr(),
);
// According to the specs, because `VK_ERROR_FRAGMENTED_POOL` was added after version
// 1.0 of Vulkan, any negative return value except out-of-memory errors must be
// considered as a fragmented pool error.
match ret {
ash::vk::Result::ERROR_OUT_OF_HOST_MEMORY => {
return Err(DescriptorPoolAllocError::OutOfHostMemory);
}
ash::vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
return Err(DescriptorPoolAllocError::OutOfDeviceMemory);
}
ash::vk::Result::ERROR_OUT_OF_POOL_MEMORY_KHR => {
return Err(DescriptorPoolAllocError::OutOfPoolMemory);
}
c if c.as_raw() < 0 => {
return Err(DescriptorPoolAllocError::FragmentedPool);
}
_ => (),
};
output.set_len(num);
Ok(UnsafeDescriptorPoolAllocIter {
sets: output.into_iter(),
})
Ok(output.into_iter().map(|s| UnsafeDescriptorSet { set: s }))
}
/// Frees some descriptor sets.
@ -369,28 +366,6 @@ impl fmt::Display for DescriptorPoolAllocError {
}
}
/// Iterator to the descriptor sets allocated from an unsafe descriptor pool.
#[derive(Debug)]
pub struct UnsafeDescriptorPoolAllocIter {
sets: VecIntoIter<ash::vk::DescriptorSet>,
}
impl Iterator for UnsafeDescriptorPoolAllocIter {
type Item = UnsafeDescriptorSet;
#[inline]
fn next(&mut self) -> Option<UnsafeDescriptorSet> {
self.sets.next().map(|s| UnsafeDescriptorSet { set: s })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.sets.size_hint()
}
}
impl ExactSizeIterator for UnsafeDescriptorPoolAllocIter {}
#[cfg(test)]
mod tests {
use crate::descriptor_set::layout::DescriptorDesc;

View File

@ -56,7 +56,7 @@ impl UnsafeDescriptorSet {
///
pub unsafe fn write<I>(&mut self, device: &Device, writes: I)
where
I: Iterator<Item = DescriptorWrite>,
I: IntoIterator<Item = DescriptorWrite>,
{
let fns = device.fns();

View File

@ -191,8 +191,12 @@ impl Device {
///
/// - Panics if one of the queue families doesn't belong to the given device.
///
// TODO: return Arc<Queue> and handle synchronization in the Queue
// TODO: should take the PhysicalDevice by value
// TODO: Eliminate QueuesIter in favour of `impl ExactSizeIterator`. This doesn't currently work
// due to this Rust bug: https://github.com/rust-lang/rust/issues/42940. The compiler will
// erroneously assume that the return iterator borrows from 'a and break.
pub fn new<'a, I>(
physical_device: PhysicalDevice,
requested_features: &Features,
@ -447,15 +451,11 @@ impl Device {
/// > **Note**: Will return `-> impl ExactSizeIterator<Item = QueueFamily>` in the future.
// TODO: ^
#[inline]
pub fn active_queue_families<'a>(
&'a self,
) -> Box<dyn ExactSizeIterator<Item = QueueFamily<'a>> + 'a> {
pub fn active_queue_families<'a>(&'a self) -> impl ExactSizeIterator<Item = QueueFamily<'a>> {
let physical_device = self.physical_device();
Box::new(
self.active_queue_families
.iter()
.map(move |&id| physical_device.queue_family_by_id(id).unwrap()),
)
self.active_queue_families
.iter()
.map(move |&id| physical_device.queue_family_by_id(id).unwrap())
}
/// Returns the features that have been enabled on the device.

View File

@ -331,7 +331,8 @@ impl ImmutableImage {
>
where
Px: Pixel + Send + Sync + Clone + 'static,
I: ExactSizeIterator<Item = Px>,
I: IntoIterator<Item = Px>,
I::IntoIter: ExactSizeIterator,
{
let source = CpuAccessibleBuffer::from_iter(
queue.device().clone(),

View File

@ -101,11 +101,13 @@ impl UnsafeImage {
) -> Result<(UnsafeImage, MemoryRequirements), ImageCreationError>
where
Mi: Into<MipmapsCount>,
I: Iterator<Item = u32>,
I: IntoIterator<Item = u32>,
{
let sharing = match sharing {
Sharing::Exclusive => (ash::vk::SharingMode::EXCLUSIVE, SmallVec::<[u32; 8]>::new()),
Sharing::Concurrent(ids) => (ash::vk::SharingMode::CONCURRENT, ids.collect()),
Sharing::Concurrent(ids) => {
(ash::vk::SharingMode::CONCURRENT, ids.into_iter().collect())
}
};
UnsafeImage::new_impl(

View File

@ -7,18 +7,16 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::error;
use std::ffi::CStr;
use std::fmt;
use std::ptr;
use std::vec::IntoIter;
use crate::check_errors;
use crate::instance::loader;
use crate::instance::loader::LoadingError;
use crate::Error;
use crate::OomError;
use crate::Version;
use std::error;
use std::ffi::CStr;
use std::fmt;
use std::ptr;
/// Queries the list of layers that are available when creating an instance.
///
@ -43,14 +41,14 @@ use crate::Version;
/// println!("Available layer: {}", layer.name());
/// }
/// ```
pub fn layers_list() -> Result<LayersIterator, LayersListError> {
pub fn layers_list() -> Result<impl ExactSizeIterator<Item = LayerProperties>, LayersListError> {
layers_list_from_loader(loader::auto_loader()?)
}
/// Same as `layers_list()`, but allows specifying a loader.
pub fn layers_list_from_loader<L>(
ptrs: &loader::FunctionPointers<L>,
) -> Result<LayersIterator, LayersListError>
) -> Result<impl ExactSizeIterator<Item = LayerProperties>, LayersListError>
where
L: loader::Loader,
{
@ -70,9 +68,7 @@ where
})?;
layers.set_len(num as usize);
Ok(LayersIterator {
iter: layers.into_iter(),
})
Ok(layers.into_iter().map(|p| LayerProperties { props: p }))
}
}
@ -224,28 +220,6 @@ impl From<Error> for LayersListError {
}
}
/// Iterator that produces the list of layers that are available.
// TODO: #[derive(Debug, Clone)]
pub struct LayersIterator {
iter: IntoIter<ash::vk::LayerProperties>,
}
impl Iterator for LayersIterator {
type Item = LayerProperties;
#[inline]
fn next(&mut self) -> Option<LayerProperties> {
self.iter.next().map(|p| LayerProperties { props: p })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
impl ExactSizeIterator for LayersIterator {}
#[cfg(test)]
mod tests {
use crate::instance;

View File

@ -57,7 +57,6 @@ pub use self::instance::Instance;
pub use self::instance::InstanceCreationError;
pub use self::layers::layers_list;
pub use self::layers::LayerProperties;
pub use self::layers::LayersIterator;
pub use self::layers::LayersListError;
pub use self::loader::LoadingError;
pub use crate::extensions::{

View File

@ -156,37 +156,15 @@ impl SupportedPresentModes {
/// Returns an iterator to the list of supported present modes.
#[inline]
pub fn iter(&self) -> SupportedPresentModesIter {
SupportedPresentModesIter(self.clone())
}
}
/// Enumeration of the `PresentMode`s that are supported.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct SupportedPresentModesIter(SupportedPresentModes);
impl Iterator for SupportedPresentModesIter {
type Item = PresentMode;
#[inline]
fn next(&mut self) -> Option<PresentMode> {
if self.0.immediate {
self.0.immediate = false;
return Some(PresentMode::Immediate);
}
if self.0.mailbox {
self.0.mailbox = false;
return Some(PresentMode::Mailbox);
}
if self.0.fifo {
self.0.fifo = false;
return Some(PresentMode::Fifo);
}
if self.0.relaxed {
self.0.relaxed = false;
return Some(PresentMode::Relaxed);
}
None
pub fn iter(&self) -> impl Iterator<Item = PresentMode> {
let moved = *self;
std::array::IntoIter::new([
PresentMode::Immediate,
PresentMode::Mailbox,
PresentMode::Fifo,
PresentMode::Relaxed,
])
.filter(move |&mode| moved.supports(mode))
}
}
@ -308,37 +286,15 @@ impl SupportedCompositeAlpha {
/// Returns an iterator to the list of supported composite alpha.
#[inline]
pub fn iter(&self) -> SupportedCompositeAlphaIter {
SupportedCompositeAlphaIter(self.clone())
}
}
/// Enumeration of the `CompositeAlpha` that are supported.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct SupportedCompositeAlphaIter(SupportedCompositeAlpha);
impl Iterator for SupportedCompositeAlphaIter {
type Item = CompositeAlpha;
#[inline]
fn next(&mut self) -> Option<CompositeAlpha> {
if self.0.opaque {
self.0.opaque = false;
return Some(CompositeAlpha::Opaque);
}
if self.0.pre_multiplied {
self.0.pre_multiplied = false;
return Some(CompositeAlpha::PreMultiplied);
}
if self.0.post_multiplied {
self.0.post_multiplied = false;
return Some(CompositeAlpha::PostMultiplied);
}
if self.0.inherit {
self.0.inherit = false;
return Some(CompositeAlpha::Inherit);
}
None
pub fn iter(&self) -> impl Iterator<Item = CompositeAlpha> {
let moved = *self;
std::array::IntoIter::new([
CompositeAlpha::Opaque,
CompositeAlpha::PreMultiplied,
CompositeAlpha::PostMultiplied,
CompositeAlpha::Inherit,
])
.filter(move |&mode| moved.supports(mode))
}
}
@ -460,57 +416,20 @@ impl SupportedSurfaceTransforms {
/// Returns an iterator to the list of supported composite alpha.
#[inline]
pub fn iter(&self) -> SupportedSurfaceTransformsIter {
SupportedSurfaceTransformsIter(self.clone())
}
}
/// Enumeration of the `SurfaceTransform` that are supported.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct SupportedSurfaceTransformsIter(SupportedSurfaceTransforms);
impl Iterator for SupportedSurfaceTransformsIter {
type Item = SurfaceTransform;
#[inline]
fn next(&mut self) -> Option<SurfaceTransform> {
if self.0.identity {
self.0.identity = false;
return Some(SurfaceTransform::Identity);
}
if self.0.rotate90 {
self.0.rotate90 = false;
return Some(SurfaceTransform::Rotate90);
}
if self.0.rotate180 {
self.0.rotate180 = false;
return Some(SurfaceTransform::Rotate180);
}
if self.0.rotate270 {
self.0.rotate270 = false;
return Some(SurfaceTransform::Rotate270);
}
if self.0.horizontal_mirror {
self.0.horizontal_mirror = false;
return Some(SurfaceTransform::HorizontalMirror);
}
if self.0.horizontal_mirror_rotate90 {
self.0.horizontal_mirror_rotate90 = false;
return Some(SurfaceTransform::HorizontalMirrorRotate90);
}
if self.0.horizontal_mirror_rotate180 {
self.0.horizontal_mirror_rotate180 = false;
return Some(SurfaceTransform::HorizontalMirrorRotate180);
}
if self.0.horizontal_mirror_rotate270 {
self.0.horizontal_mirror_rotate270 = false;
return Some(SurfaceTransform::HorizontalMirrorRotate270);
}
if self.0.inherit {
self.0.inherit = false;
return Some(SurfaceTransform::Inherit);
}
None
pub fn iter(&self) -> impl Iterator<Item = SurfaceTransform> {
let moved = *self;
std::array::IntoIter::new([
SurfaceTransform::Identity,
SurfaceTransform::Rotate90,
SurfaceTransform::Rotate180,
SurfaceTransform::Rotate270,
SurfaceTransform::HorizontalMirror,
SurfaceTransform::HorizontalMirrorRotate90,
SurfaceTransform::HorizontalMirrorRotate180,
SurfaceTransform::HorizontalMirrorRotate270,
SurfaceTransform::Inherit,
])
.filter(move |&mode| moved.supports(mode))
}
}

View File

@ -296,18 +296,13 @@
//! ```
//!
use std::sync::atomic::AtomicBool;
pub use self::capabilities::Capabilities;
pub use self::capabilities::ColorSpace;
pub use self::capabilities::CompositeAlpha;
pub use self::capabilities::PresentMode;
pub use self::capabilities::SupportedCompositeAlpha;
pub use self::capabilities::SupportedCompositeAlphaIter;
pub use self::capabilities::SupportedPresentModes;
pub use self::capabilities::SupportedPresentModesIter;
pub use self::capabilities::SupportedSurfaceTransforms;
pub use self::capabilities::SupportedSurfaceTransformsIter;
pub use self::capabilities::SurfaceTransform;
pub use self::present_region::PresentRegion;
pub use self::present_region::RectangleLayer;
@ -327,6 +322,7 @@ pub use self::swapchain::Swapchain;
pub use self::swapchain::SwapchainAcquireFuture;
pub use self::swapchain::SwapchainBuilder;
pub use self::swapchain::SwapchainCreationError;
use std::sync::atomic::AtomicBool;
mod capabilities;
pub mod display;

View File

@ -164,7 +164,7 @@ impl<'a> From<&'a [&'a Arc<Queue>]> for SharingMode {
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Sharing<I>
where
I: Iterator<Item = u32>,
I: IntoIterator<Item = u32>,
{
/// The resource is used is only one queue family.
Exclusive,