Preemptively rename stuff, mark enums as non-exhaustive (#2172)

This commit is contained in:
marc0246 2023-04-01 18:51:58 +02:00 committed by GitHub
parent 932d5143a3
commit ad530ed48a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 45 additions and 33 deletions

View File

@ -370,7 +370,7 @@ fn main() {
| BufferUsage::TRANSFER_DST
| BufferUsage::VERTEX_BUFFER,
// Specify this buffer will only be used by the device.
memory_usage: MemoryUsage::GpuOnly,
memory_usage: MemoryUsage::DeviceOnly,
..Default::default()
},
PARTICLE_COUNT as vulkano::DeviceSize,

View File

@ -51,7 +51,7 @@
//! When allocating memory for a buffer, you have to specify a *memory usage*. This tells the
//! memory allocator what memory type it should pick for the allocation.
//!
//! - [`MemoryUsage::GpuOnly`] will allocate a buffer that's usually located in device-local
//! - [`MemoryUsage::DeviceOnly`] will allocate a buffer that's usually located in device-local
//! memory and whose content can't be directly accessed by your application. Accessing this
//! buffer from the device is generally faster compared to accessing a buffer that's located in
//! host-visible memory.
@ -195,7 +195,7 @@ pub mod view;
/// // Specify use as a storage buffer and transfer destination.
/// buffer_usage: BufferUsage::STORAGE_BUFFER | BufferUsage::TRANSFER_DST,
/// // Specify use by the device only.
/// memory_usage: MemoryUsage::GpuOnly,
/// memory_usage: MemoryUsage::DeviceOnly,
/// ..Default::default()
/// },
/// 10_000 as DeviceSize,
@ -565,7 +565,7 @@ pub struct BufferAllocateInfo {
/// The memory usage to use for the allocation.
///
/// If this is set to [`MemoryUsage::GpuOnly`], then the buffer may need to be initialized
/// If this is set to [`MemoryUsage::DeviceOnly`], then the buffer may need to be initialized
/// using a staging buffer. The exception is some integrated GPUs and laptop GPUs, which do not
/// have memory types that are not host-visible. With [`MemoryUsage::Upload`] and
/// [`MemoryUsage::Download`], a staging buffer is never needed.

View File

@ -1277,7 +1277,7 @@ mod tests {
..requirements
},
allocation_type: AllocationType::Linear,
usage: MemoryUsage::GpuOnly,
usage: MemoryUsage::DeviceOnly,
..Default::default()
})
.unwrap();
@ -1286,7 +1286,7 @@ mod tests {
.allocate(AllocationCreateInfo {
requirements,
allocation_type: AllocationType::Linear,
usage: MemoryUsage::GpuOnly,
usage: MemoryUsage::DeviceOnly,
..Default::default()
})
.unwrap();

View File

@ -443,7 +443,7 @@ mod tests {
&memory_allocator,
BufferAllocateInfo {
buffer_usage: BufferUsage::UNIFORM_TEXEL_BUFFER,
memory_usage: MemoryUsage::GpuOnly,
memory_usage: MemoryUsage::DeviceOnly,
..Default::default()
},
128,
@ -469,7 +469,7 @@ mod tests {
&memory_allocator,
BufferAllocateInfo {
buffer_usage: BufferUsage::STORAGE_TEXEL_BUFFER,
memory_usage: MemoryUsage::GpuOnly,
memory_usage: MemoryUsage::DeviceOnly,
..Default::default()
},
128,
@ -495,7 +495,7 @@ mod tests {
&memory_allocator,
BufferAllocateInfo {
buffer_usage: BufferUsage::STORAGE_TEXEL_BUFFER,
memory_usage: MemoryUsage::GpuOnly,
memory_usage: MemoryUsage::DeviceOnly,
..Default::default()
},
128,
@ -521,7 +521,7 @@ mod tests {
&memory_allocator,
BufferAllocateInfo {
buffer_usage: BufferUsage::TRANSFER_DST, // Dummy value
memory_usage: MemoryUsage::GpuOnly,
memory_usage: MemoryUsage::DeviceOnly,
..Default::default()
},
128,
@ -549,7 +549,7 @@ mod tests {
&memory_allocator,
BufferAllocateInfo {
buffer_usage: BufferUsage::UNIFORM_TEXEL_BUFFER | BufferUsage::STORAGE_TEXEL_BUFFER,
memory_usage: MemoryUsage::GpuOnly,
memory_usage: MemoryUsage::DeviceOnly,
..Default::default()
},
128,

View File

@ -419,7 +419,7 @@ impl AttachmentImage {
let create_info = AllocationCreateInfo {
requirements,
allocation_type: AllocationType::NonLinear,
usage: MemoryUsage::GpuOnly,
usage: MemoryUsage::DeviceOnly,
allocate_preference: MemoryAllocatePreference::Unknown,
dedicated_allocation: Some(DedicatedAllocation::Image(&raw_image)),
..Default::default()
@ -520,7 +520,10 @@ impl AttachmentImage {
)?;
let requirements = raw_image.memory_requirements()[0];
let memory_type_index = allocator
.find_memory_type_index(requirements.memory_type_bits, MemoryUsage::GpuOnly.into())
.find_memory_type_index(
requirements.memory_type_bits,
MemoryUsage::DeviceOnly.into(),
)
.expect("failed to find a suitable memory type");
match unsafe {

View File

@ -141,7 +141,7 @@ impl ImmutableImage {
let create_info = AllocationCreateInfo {
requirements,
allocation_type: AllocationType::NonLinear,
usage: MemoryUsage::GpuOnly,
usage: MemoryUsage::DeviceOnly,
allocate_preference: MemoryAllocatePreference::Unknown,
dedicated_allocation: Some(DedicatedAllocation::Image(&raw_image)),
..Default::default()

View File

@ -128,7 +128,7 @@ impl StorageImage {
let create_info = AllocationCreateInfo {
requirements,
allocation_type: AllocationType::NonLinear,
usage: MemoryUsage::GpuOnly,
usage: MemoryUsage::DeviceOnly,
allocate_preference: MemoryAllocatePreference::Unknown,
dedicated_allocation: Some(DedicatedAllocation::Image(&raw_image)),
..Default::default()
@ -204,7 +204,10 @@ impl StorageImage {
)?;
let requirements = raw_image.memory_requirements()[0];
let memory_type_index = allocator
.find_memory_type_index(requirements.memory_type_bits, MemoryUsage::GpuOnly.into())
.find_memory_type_index(
requirements.memory_type_bits,
MemoryUsage::DeviceOnly.into(),
)
.expect("failed to find a suitable memory type");
match unsafe {
@ -322,7 +325,10 @@ impl StorageImage {
let requirements = image.memory_requirements()[0];
let memory_type_index = allocator
.find_memory_type_index(requirements.memory_type_bits, MemoryUsage::GpuOnly.into())
.find_memory_type_index(
requirements.memory_type_bits,
MemoryUsage::DeviceOnly.into(),
)
.expect("failed to find a suitable memory type");
assert!(device.enabled_extensions().khr_external_memory_fd);

View File

@ -351,7 +351,7 @@ impl From<MemoryUsage> for MemoryTypeFilter {
let mut filter = Self::default();
match usage {
MemoryUsage::GpuOnly => {
MemoryUsage::DeviceOnly => {
filter.preferred_flags |= MemoryPropertyFlags::DEVICE_LOCAL;
filter.not_preferred_flags |= MemoryPropertyFlags::HOST_VISIBLE;
}
@ -410,7 +410,7 @@ pub struct AllocationCreateInfo<'d> {
/// The intended usage for the allocation.
///
/// The default value is [`MemoryUsage::GpuOnly`].
/// The default value is [`MemoryUsage::DeviceOnly`].
pub usage: MemoryUsage,
/// How eager the allocator should be to allocate [`DeviceMemory`].
@ -450,7 +450,7 @@ impl Default for AllocationCreateInfo<'_> {
requires_dedicated_allocation: false,
},
allocation_type: AllocationType::Unknown,
usage: MemoryUsage::GpuOnly,
usage: MemoryUsage::DeviceOnly,
allocate_preference: MemoryAllocatePreference::Unknown,
dedicated_allocation: None,
_ne: crate::NonExhaustive(()),
@ -460,14 +460,15 @@ impl Default for AllocationCreateInfo<'_> {
/// Describes how a memory allocation is going to be used.
///
/// This is mostly an optimization, except for `MemoryUsage::GpuOnly` which will pick a memory type
/// that is not CPU-accessible if such a type exists.
/// This is mostly an optimization, except for `MemoryUsage::DeviceOnly` which will pick a memory
/// type that is not host-accessible if such a type exists.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[non_exhaustive]
pub enum MemoryUsage {
/// The memory is intended to only be used by the GPU.
/// The memory is intended to only be used by the device.
///
/// Prefers picking a memory type with the [`DEVICE_LOCAL`] flag and
/// without the [`HOST_VISIBLE`] flag.
/// Prefers picking a memory type with the [`DEVICE_LOCAL`] flag and without the
/// [`HOST_VISIBLE`] flag.
///
/// This option is what you will always want to use unless the memory needs to be accessed by
/// the CPU, because a memory type that can only be accessed by the GPU is going to give the
@ -477,16 +478,16 @@ pub enum MemoryUsage {
///
/// [`DEVICE_LOCAL`]: MemoryPropertyFlags::DEVICE_LOCAL
/// [`HOST_VISIBLE`]: MemoryPropertyFlags::HOST_VISIBLE
GpuOnly,
DeviceOnly,
/// The memory is intended for upload to the GPU.
/// The memory is intended for upload to the device.
///
/// Guarantees picking a memory type with the [`HOST_VISIBLE`] flag. Prefers picking one
/// without the [`HOST_CACHED`] flag and with the [`DEVICE_LOCAL`] flag.
///
/// This option is best suited for resources that need to be constantly updated by the CPU,
/// like vertex and index buffers for example. It is also neccessary for *staging buffers*,
/// whose only purpose in life it is to get data into `device_local` memory or texels into an
/// whose only purpose in life it is to get data into device-local memory or texels into an
/// optimal image.
///
/// [`HOST_VISIBLE`]: MemoryPropertyFlags::HOST_VISIBLE
@ -494,14 +495,14 @@ pub enum MemoryUsage {
/// [`DEVICE_LOCAL`]: MemoryPropertyFlags::DEVICE_LOCAL
Upload,
/// The memory is intended for download from the GPU.
/// The memory is intended for download from the device.
///
/// Guarantees picking a memory type with the [`HOST_VISIBLE`] flag. Prefers picking one with
/// the [`HOST_CACHED`] flag and without the [`DEVICE_LOCAL`] flag.
///
/// This option is best suited if you're using the GPU for things other than rendering and you
/// need to get the results back to the CPU. That might be compute shading, or image or video
/// manipulation, or screenshotting for example.
/// This option is best suited if you're using the device for things other than rendering and
/// you need to get the results back to the host. That might be compute shading, or image or
/// video manipulation, or screenshotting for example.
///
/// [`HOST_VISIBLE`]: MemoryPropertyFlags::HOST_VISIBLE
/// [`HOST_CACHED`]: MemoryPropertyFlags::HOST_CACHED
@ -511,6 +512,7 @@ pub enum MemoryUsage {
/// Describes whether allocating [`DeviceMemory`] is desired.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[non_exhaustive]
pub enum MemoryAllocatePreference {
/// There is no known preference, let the allocator decide.
Unknown,
@ -1183,7 +1185,7 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
/// `create_info.requirements.size` doesn't match the memory requirements of the resource.
/// - Panics if finding a suitable memory type failed. This only happens if the
/// `create_info.requirements` correspond to those of an optimal image but
/// `create_info.usage` is not [`MemoryUsage::GpuOnly`].
/// `create_info.usage` is not [`MemoryUsage::DeviceOnly`].
///
/// # Errors
///

View File

@ -763,6 +763,7 @@ impl From<AllocationCreateInfo<'_>> for SuballocationCreateInfo {
/// [suballocator]: Suballocator
/// [buffer-image granularity]: super#buffer-image-granularity
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[non_exhaustive]
pub enum AllocationType {
/// The type of resource is unknown, it might be either linear or non-linear. What this means is
/// that allocations created with this type must always be aligned to the buffer-image