mirror of
https://github.com/vulkano-rs/vulkano.git
synced 2024-11-25 16:25:31 +00:00
Memory allocation revamp (#1997)
* Add suballocators * Add tests * Retroactively abort `PoolAllocatorCreateInfo` * Use const generic for `PoolAllocator`'s block size * Move `new` and `try_into_region` to `Suballocator` * Move `allocate_unchecked` to `Suballocator` * Fix constructor visibility * Move `free_size` to `Suballocator` * Small fixes * Merge `BumpAllocator` and `SyncBumpAllocator` * Restrict `AllocParent::None` to tests * Rewording * Add dedicated allocations * Add `Suballocator::cleanup` * Make `free_size`s lock-free * Add `Suballocator::largest_free_chunk` * Add `ArrayVec` * Remove useless `unsafe` * Add `MemoryAllocator` * Add `GenericMemoryAllocator` * Small fixes * Retroactively abort `largest_free_chunk` * Small docs adjustments * Rearrange * Add `MemoryAlloc::mapped_ptr` * Fix oopsie * Add support for non-coherent mapped memory * Add `DeviceOwned` subtrait to `Suballocator` * Move granularities to suballocators, fix tests * Add cache control * Fix oopsie where alignment of 0 is possible * Store `Arc<DeviceMemory>` in suballocators * Add `MemoryAllocator::create_{buffer, image}` * Remove `MemoryPool` * Fix examples * Remove `MemoryAlloc::{memory, memory_type_index}` * Minor improvement to `AllocationCreationError` * Add some example docs * Add support for external memory * Swicheroo * Small fix * Shorten sm names, cache atom size in suballocators * Add config for allocation type to generic allocatr * Engrish * Fix a big oopsie * Spliteroo * Inglisch
This commit is contained in:
parent
f079c2bc08
commit
34b709547f
@ -25,6 +25,7 @@ use vulkano::{
|
||||
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo,
|
||||
},
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
|
||||
sync::{self, GpuFuture},
|
||||
VulkanLibrary,
|
||||
@ -144,6 +145,7 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
|
||||
@ -153,7 +155,7 @@ fn main() {
|
||||
let data_iter = 0..65536u32;
|
||||
// Builds the buffer and fills it with this iterator.
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -36,6 +36,7 @@ use vulkano::{
|
||||
image::{view::ImageView, ImageAccess, ImageUsage, SwapchainImage},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::InputAssemblyState,
|
||||
@ -169,8 +170,10 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
|
||||
|
||||
// Vertex Buffer Pool
|
||||
let buffer_pool: CpuBufferPool<Vertex> = CpuBufferPool::vertex_buffer(device.clone());
|
||||
let buffer_pool: CpuBufferPool<Vertex> = CpuBufferPool::vertex_buffer(memory_allocator);
|
||||
|
||||
mod vs {
|
||||
vulkano_shaders::shader! {
|
||||
|
@ -24,6 +24,7 @@ use vulkano::{
|
||||
},
|
||||
Instance, InstanceCreateInfo, InstanceExtensions,
|
||||
},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
VulkanLibrary,
|
||||
};
|
||||
|
||||
@ -175,7 +176,7 @@ fn main() {
|
||||
.expect("failed to create device");
|
||||
let queue = queues.next().unwrap();
|
||||
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device);
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
let mut command_buffer_builder = AutoCommandBufferBuilder::primary(
|
||||
&command_buffer_allocator,
|
||||
queue.queue_family_index(),
|
||||
@ -191,7 +192,9 @@ fn main() {
|
||||
array_layers: 1,
|
||||
};
|
||||
static DATA: [[u8; 4]; 4096 * 4096] = [[0; 4]; 4096 * 4096];
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let _ = ImmutableImage::from_iter(
|
||||
&memory_allocator,
|
||||
DATA.iter().copied(),
|
||||
dimensions,
|
||||
MipmapsCount::One,
|
||||
|
@ -21,6 +21,7 @@ use vulkano::{
|
||||
device::Queue,
|
||||
image::ImageViewAbstract,
|
||||
impl_vertex,
|
||||
memory::allocator::MemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
color_blend::{AttachmentBlend, BlendFactor, BlendOp, ColorBlendState},
|
||||
@ -48,6 +49,7 @@ impl AmbientLightingSystem {
|
||||
pub fn new(
|
||||
gfx_queue: Arc<Queue>,
|
||||
subpass: Subpass,
|
||||
memory_allocator: &impl MemoryAllocator,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
descriptor_set_allocator: Rc<StandardDescriptorSetAllocator>,
|
||||
) -> AmbientLightingSystem {
|
||||
@ -66,7 +68,7 @@ impl AmbientLightingSystem {
|
||||
];
|
||||
let vertex_buffer = {
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
gfx_queue.device().clone(),
|
||||
memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -22,6 +22,7 @@ use vulkano::{
|
||||
device::Queue,
|
||||
image::ImageViewAbstract,
|
||||
impl_vertex,
|
||||
memory::allocator::MemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
color_blend::{AttachmentBlend, BlendFactor, BlendOp, ColorBlendState},
|
||||
@ -49,6 +50,7 @@ impl DirectionalLightingSystem {
|
||||
pub fn new(
|
||||
gfx_queue: Arc<Queue>,
|
||||
subpass: Subpass,
|
||||
memory_allocator: &impl MemoryAllocator,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
descriptor_set_allocator: Rc<StandardDescriptorSetAllocator>,
|
||||
) -> DirectionalLightingSystem {
|
||||
@ -67,7 +69,7 @@ impl DirectionalLightingSystem {
|
||||
];
|
||||
let vertex_buffer = {
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
gfx_queue.device().clone(),
|
||||
memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -22,6 +22,7 @@ use vulkano::{
|
||||
device::Queue,
|
||||
image::ImageViewAbstract,
|
||||
impl_vertex,
|
||||
memory::allocator::MemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
color_blend::{AttachmentBlend, BlendFactor, BlendOp, ColorBlendState},
|
||||
@ -48,6 +49,7 @@ impl PointLightingSystem {
|
||||
pub fn new(
|
||||
gfx_queue: Arc<Queue>,
|
||||
subpass: Subpass,
|
||||
memory_allocator: &impl MemoryAllocator,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
descriptor_set_allocator: Rc<StandardDescriptorSetAllocator>,
|
||||
) -> PointLightingSystem {
|
||||
@ -66,7 +68,7 @@ impl PointLightingSystem {
|
||||
];
|
||||
let vertex_buffer = {
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
gfx_queue.device().clone(),
|
||||
memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -24,6 +24,7 @@ use vulkano::{
|
||||
device::Queue,
|
||||
format::Format,
|
||||
image::{view::ImageView, AttachmentImage, ImageAccess, ImageUsage, ImageViewAbstract},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass},
|
||||
sync::GpuFuture,
|
||||
};
|
||||
@ -38,6 +39,7 @@ pub struct FrameSystem {
|
||||
// in of a change in the dimensions.
|
||||
render_pass: Arc<RenderPass>,
|
||||
|
||||
memory_allocator: Arc<StandardMemoryAllocator>,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
|
||||
// Intermediate render target that will contain the albedo of each pixel of the scene.
|
||||
@ -71,6 +73,7 @@ impl FrameSystem {
|
||||
pub fn new(
|
||||
gfx_queue: Arc<Queue>,
|
||||
final_output_format: Format,
|
||||
memory_allocator: Arc<StandardMemoryAllocator>,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
) -> FrameSystem {
|
||||
// Creating the render pass.
|
||||
@ -152,7 +155,7 @@ impl FrameSystem {
|
||||
// These images will be replaced the first time we call `frame()`.
|
||||
let diffuse_buffer = ImageView::new_default(
|
||||
AttachmentImage::with_usage(
|
||||
gfx_queue.device().clone(),
|
||||
&*memory_allocator,
|
||||
[1, 1],
|
||||
Format::A2B10G10R10_UNORM_PACK32,
|
||||
ImageUsage {
|
||||
@ -166,7 +169,7 @@ impl FrameSystem {
|
||||
.unwrap();
|
||||
let normals_buffer = ImageView::new_default(
|
||||
AttachmentImage::with_usage(
|
||||
gfx_queue.device().clone(),
|
||||
&*memory_allocator,
|
||||
[1, 1],
|
||||
Format::R16G16B16A16_SFLOAT,
|
||||
ImageUsage {
|
||||
@ -180,7 +183,7 @@ impl FrameSystem {
|
||||
.unwrap();
|
||||
let depth_buffer = ImageView::new_default(
|
||||
AttachmentImage::with_usage(
|
||||
gfx_queue.device().clone(),
|
||||
&*memory_allocator,
|
||||
[1, 1],
|
||||
Format::D16_UNORM,
|
||||
ImageUsage {
|
||||
@ -203,18 +206,21 @@ impl FrameSystem {
|
||||
let ambient_lighting_system = AmbientLightingSystem::new(
|
||||
gfx_queue.clone(),
|
||||
lighting_subpass.clone(),
|
||||
&*memory_allocator,
|
||||
command_buffer_allocator.clone(),
|
||||
descriptor_set_allocator.clone(),
|
||||
);
|
||||
let directional_lighting_system = DirectionalLightingSystem::new(
|
||||
gfx_queue.clone(),
|
||||
lighting_subpass.clone(),
|
||||
&*memory_allocator,
|
||||
command_buffer_allocator.clone(),
|
||||
descriptor_set_allocator.clone(),
|
||||
);
|
||||
let point_lighting_system = PointLightingSystem::new(
|
||||
gfx_queue.clone(),
|
||||
lighting_subpass,
|
||||
&*memory_allocator,
|
||||
command_buffer_allocator.clone(),
|
||||
descriptor_set_allocator,
|
||||
);
|
||||
@ -222,6 +228,7 @@ impl FrameSystem {
|
||||
FrameSystem {
|
||||
gfx_queue,
|
||||
render_pass,
|
||||
memory_allocator,
|
||||
command_buffer_allocator,
|
||||
diffuse_buffer,
|
||||
normals_buffer,
|
||||
@ -270,7 +277,7 @@ impl FrameSystem {
|
||||
// render pass their content becomes undefined.
|
||||
self.diffuse_buffer = ImageView::new_default(
|
||||
AttachmentImage::with_usage(
|
||||
self.gfx_queue.device().clone(),
|
||||
&*self.memory_allocator,
|
||||
img_dims,
|
||||
Format::A2B10G10R10_UNORM_PACK32,
|
||||
ImageUsage {
|
||||
@ -284,7 +291,7 @@ impl FrameSystem {
|
||||
.unwrap();
|
||||
self.normals_buffer = ImageView::new_default(
|
||||
AttachmentImage::with_usage(
|
||||
self.gfx_queue.device().clone(),
|
||||
&*self.memory_allocator,
|
||||
img_dims,
|
||||
Format::R16G16B16A16_SFLOAT,
|
||||
ImageUsage {
|
||||
@ -298,7 +305,7 @@ impl FrameSystem {
|
||||
.unwrap();
|
||||
self.depth_buffer = ImageView::new_default(
|
||||
AttachmentImage::with_usage(
|
||||
self.gfx_queue.device().clone(),
|
||||
&*self.memory_allocator,
|
||||
img_dims,
|
||||
Format::D16_UNORM,
|
||||
ImageUsage {
|
||||
|
@ -30,7 +30,7 @@ use crate::{
|
||||
triangle_draw_system::TriangleDrawSystem,
|
||||
};
|
||||
use cgmath::{Matrix4, SquareMatrix, Vector3};
|
||||
use std::rc::Rc;
|
||||
use std::{rc::Rc, sync::Arc};
|
||||
use vulkano::{
|
||||
command_buffer::allocator::StandardCommandBufferAllocator,
|
||||
device::{
|
||||
@ -38,6 +38,7 @@ use vulkano::{
|
||||
},
|
||||
image::{view::ImageView, ImageUsage},
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
swapchain::{
|
||||
acquire_next_image, AcquireError, Swapchain, SwapchainCreateInfo, SwapchainCreationError,
|
||||
SwapchainPresentInfo,
|
||||
@ -164,17 +165,20 @@ fn main() {
|
||||
(swapchain, images)
|
||||
};
|
||||
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
|
||||
let command_buffer_allocator = Rc::new(StandardCommandBufferAllocator::new(device.clone()));
|
||||
|
||||
// Here is the basic initialization for the deferred system.
|
||||
let mut frame_system = FrameSystem::new(
|
||||
queue.clone(),
|
||||
swapchain.image_format(),
|
||||
memory_allocator.clone(),
|
||||
command_buffer_allocator.clone(),
|
||||
);
|
||||
let triangle_draw_system = TriangleDrawSystem::new(
|
||||
queue.clone(),
|
||||
frame_system.deferred_subpass(),
|
||||
&memory_allocator,
|
||||
command_buffer_allocator,
|
||||
);
|
||||
|
||||
|
@ -17,6 +17,7 @@ use vulkano::{
|
||||
},
|
||||
device::Queue,
|
||||
impl_vertex,
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
depth_stencil::DepthStencilState,
|
||||
@ -42,6 +43,7 @@ impl TriangleDrawSystem {
|
||||
pub fn new(
|
||||
gfx_queue: Arc<Queue>,
|
||||
subpass: Subpass,
|
||||
memory_allocator: &StandardMemoryAllocator,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
) -> TriangleDrawSystem {
|
||||
let vertices = [
|
||||
@ -57,7 +59,7 @@ impl TriangleDrawSystem {
|
||||
];
|
||||
let vertex_buffer = {
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
gfx_queue.device().clone(),
|
||||
memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -28,6 +28,7 @@ use vulkano::{
|
||||
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo,
|
||||
},
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
|
||||
sync::{self, GpuFuture},
|
||||
VulkanLibrary,
|
||||
@ -131,6 +132,7 @@ fn main() {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
|
||||
@ -164,7 +166,7 @@ fn main() {
|
||||
};
|
||||
|
||||
let input_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
uniform_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -175,7 +177,7 @@ fn main() {
|
||||
.unwrap();
|
||||
|
||||
let output_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -30,6 +30,7 @@ use vulkano::{
|
||||
format::Format,
|
||||
image::{view::ImageView, ImageDimensions, StorageImage},
|
||||
instance::{Instance, InstanceCreateInfo, InstanceExtensions},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
|
||||
sync::{self, GpuFuture},
|
||||
VulkanLibrary,
|
||||
@ -198,11 +199,12 @@ fn main() {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
|
||||
let image = StorageImage::new(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 1024,
|
||||
height: 1024,
|
||||
@ -223,7 +225,7 @@ fn main() {
|
||||
.unwrap();
|
||||
|
||||
let buf = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_dst: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -35,6 +35,7 @@ mod linux {
|
||||
debug::{DebugUtilsMessenger, DebugUtilsMessengerCreateInfo},
|
||||
Instance, InstanceCreateInfo, InstanceExtensions,
|
||||
},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
color_blend::ColorBlendState,
|
||||
@ -94,11 +95,12 @@ mod linux {
|
||||
mut framebuffers,
|
||||
sampler,
|
||||
pipeline,
|
||||
memory_allocator,
|
||||
vertex_buffer,
|
||||
) = vk_setup(display, &event_loop);
|
||||
|
||||
let image = StorageImage::new_with_exportable_fd(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
vulkano::image::ImageDimensions::Dim2d {
|
||||
width: 200,
|
||||
height: 200,
|
||||
@ -416,6 +418,7 @@ mod linux {
|
||||
Vec<Arc<Framebuffer>>,
|
||||
Arc<vulkano::sampler::Sampler>,
|
||||
Arc<GraphicsPipeline>,
|
||||
StandardMemoryAllocator,
|
||||
Arc<CpuAccessibleBuffer<[Vertex]>>,
|
||||
) {
|
||||
let library = VulkanLibrary::new().unwrap();
|
||||
@ -561,6 +564,8 @@ mod linux {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
let vertices = [
|
||||
Vertex {
|
||||
position: [-0.5, -0.5],
|
||||
@ -576,7 +581,7 @@ mod linux {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::<[Vertex]>::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -652,6 +657,7 @@ mod linux {
|
||||
framebuffers,
|
||||
sampler,
|
||||
pipeline,
|
||||
memory_allocator,
|
||||
vertex_buffer,
|
||||
)
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ use vulkano::{
|
||||
},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
color_blend::ColorBlendState,
|
||||
@ -160,6 +161,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod)]
|
||||
struct Vertex {
|
||||
@ -182,7 +185,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::<[Vertex]>::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -237,7 +240,7 @@ fn main() {
|
||||
reader.next_frame(&mut image_data).unwrap();
|
||||
|
||||
let image = StorageImage::new(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
dimensions,
|
||||
Format::R8G8B8A8_UNORM,
|
||||
[queue.queue_family_index()],
|
||||
@ -245,7 +248,7 @@ fn main() {
|
||||
.unwrap();
|
||||
|
||||
let buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_src: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -28,6 +28,7 @@ use vulkano::{
|
||||
},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
color_blend::ColorBlendState,
|
||||
@ -158,6 +159,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod)]
|
||||
struct Vertex {
|
||||
@ -180,7 +183,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::<[Vertex]>::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -234,6 +237,7 @@ fn main() {
|
||||
reader.next_frame(&mut image_data).unwrap();
|
||||
|
||||
let image = ImmutableImage::from_iter(
|
||||
&memory_allocator,
|
||||
image_data,
|
||||
dimensions,
|
||||
MipmapsCount::One,
|
||||
|
@ -37,6 +37,7 @@ use vulkano::{
|
||||
},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
color_blend::ColorBlendState,
|
||||
@ -164,6 +165,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod)]
|
||||
struct Vertex {
|
||||
@ -186,7 +189,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::<[Vertex]>::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -240,6 +243,7 @@ fn main() {
|
||||
reader.next_frame(&mut image_data).unwrap();
|
||||
|
||||
let image = ImmutableImage::from_iter(
|
||||
&memory_allocator,
|
||||
image_data,
|
||||
dimensions,
|
||||
MipmapsCount::One,
|
||||
|
@ -41,6 +41,7 @@ use vulkano::{
|
||||
image::{view::ImageView, ImageAccess, ImageUsage, SwapchainImage},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::{MemoryUsage, StandardMemoryAllocator},
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::InputAssemblyState,
|
||||
@ -254,23 +255,27 @@ fn main() {
|
||||
let fs = fs::load(device.clone()).unwrap();
|
||||
let cs = cs::load(device.clone()).unwrap();
|
||||
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
|
||||
|
||||
// Each frame we generate a new set of vertices and each frame we need a new DrawIndirectCommand struct to
|
||||
// set the number of vertices to draw
|
||||
let indirect_args_pool: CpuBufferPool<DrawIndirectCommand> = CpuBufferPool::new(
|
||||
device.clone(),
|
||||
memory_allocator.clone(),
|
||||
BufferUsage {
|
||||
indirect_buffer: true,
|
||||
storage_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
},
|
||||
MemoryUsage::Upload,
|
||||
);
|
||||
let vertex_pool: CpuBufferPool<Vertex> = CpuBufferPool::new(
|
||||
device.clone(),
|
||||
memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
},
|
||||
MemoryUsage::Upload,
|
||||
);
|
||||
|
||||
let compute_pipeline = ComputePipeline::new(
|
||||
|
@ -26,6 +26,7 @@ use vulkano::{
|
||||
image::{view::ImageView, ImageAccess, ImageUsage, SwapchainImage},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::InputAssemblyState,
|
||||
@ -175,6 +176,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
// We now create a buffer that will store the shape of our triangle.
|
||||
// This triangle is identical to the one in the `triangle.rs` example.
|
||||
let vertices = [
|
||||
@ -190,7 +193,7 @@ fn main() {
|
||||
];
|
||||
let vertex_buffer = {
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -225,7 +228,7 @@ fn main() {
|
||||
data
|
||||
};
|
||||
let instance_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -15,6 +15,7 @@ use std::{rc::Rc, sync::Arc};
|
||||
use vulkano::command_buffer::allocator::StandardCommandBufferAllocator;
|
||||
use vulkano::descriptor_set::allocator::StandardDescriptorSetAllocator;
|
||||
use vulkano::device::Queue;
|
||||
use vulkano::memory::allocator::StandardMemoryAllocator;
|
||||
use vulkano::sync::GpuFuture;
|
||||
use vulkano_util::renderer::{DeviceImageView, VulkanoWindowRenderer};
|
||||
use vulkano_util::window::WindowDescriptor;
|
||||
@ -60,6 +61,9 @@ pub struct FractalApp {
|
||||
|
||||
impl FractalApp {
|
||||
pub fn new(gfx_queue: Arc<Queue>, image_format: vulkano::format::Format) -> FractalApp {
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(
|
||||
gfx_queue.device().clone(),
|
||||
));
|
||||
let command_buffer_allocator = Rc::new(StandardCommandBufferAllocator::new(
|
||||
gfx_queue.device().clone(),
|
||||
));
|
||||
@ -70,11 +74,13 @@ impl FractalApp {
|
||||
FractalApp {
|
||||
fractal_pipeline: FractalComputePipeline::new(
|
||||
gfx_queue.clone(),
|
||||
memory_allocator.clone(),
|
||||
command_buffer_allocator.clone(),
|
||||
descriptor_set_allocator.clone(),
|
||||
),
|
||||
place_over_frame: RenderPassPlaceOverFrame::new(
|
||||
gfx_queue,
|
||||
&*memory_allocator,
|
||||
command_buffer_allocator,
|
||||
descriptor_set_allocator,
|
||||
image_format,
|
||||
|
@ -21,6 +21,7 @@ use vulkano::{
|
||||
},
|
||||
device::Queue,
|
||||
image::ImageAccess,
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
|
||||
sync::GpuFuture,
|
||||
};
|
||||
@ -29,6 +30,7 @@ use vulkano_util::renderer::DeviceImageView;
|
||||
pub struct FractalComputePipeline {
|
||||
queue: Arc<Queue>,
|
||||
pipeline: Arc<ComputePipeline>,
|
||||
memory_allocator: Arc<StandardMemoryAllocator>,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
descriptor_set_allocator: Rc<StandardDescriptorSetAllocator>,
|
||||
palette: Arc<CpuAccessibleBuffer<[[f32; 4]]>>,
|
||||
@ -39,6 +41,7 @@ pub struct FractalComputePipeline {
|
||||
impl FractalComputePipeline {
|
||||
pub fn new(
|
||||
queue: Arc<Queue>,
|
||||
memory_allocator: Arc<StandardMemoryAllocator>,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
descriptor_set_allocator: Rc<StandardDescriptorSetAllocator>,
|
||||
) -> FractalComputePipeline {
|
||||
@ -53,7 +56,7 @@ impl FractalComputePipeline {
|
||||
];
|
||||
let palette_size = colors.len() as i32;
|
||||
let palette = CpuAccessibleBuffer::from_iter(
|
||||
queue.device().clone(),
|
||||
&*memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -79,6 +82,7 @@ impl FractalComputePipeline {
|
||||
FractalComputePipeline {
|
||||
queue,
|
||||
pipeline,
|
||||
memory_allocator,
|
||||
command_buffer_allocator,
|
||||
descriptor_set_allocator,
|
||||
palette,
|
||||
@ -98,7 +102,7 @@ impl FractalComputePipeline {
|
||||
colors.push([r, g, b, a]);
|
||||
}
|
||||
self.palette = CpuAccessibleBuffer::from_iter(
|
||||
self.queue.device().clone(),
|
||||
&*self.memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -21,6 +21,7 @@ use vulkano::{
|
||||
device::Queue,
|
||||
image::ImageViewAbstract,
|
||||
impl_vertex,
|
||||
memory::allocator::MemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::InputAssemblyState,
|
||||
@ -81,12 +82,13 @@ impl PixelsDrawPipeline {
|
||||
pub fn new(
|
||||
gfx_queue: Arc<Queue>,
|
||||
subpass: Subpass,
|
||||
memory_allocator: &impl MemoryAllocator,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
descriptor_set_allocator: Rc<StandardDescriptorSetAllocator>,
|
||||
) -> PixelsDrawPipeline {
|
||||
let (vertices, indices) = textured_quad(2.0, 2.0);
|
||||
let vertex_buffer = CpuAccessibleBuffer::<[TexturedVertex]>::from_iter(
|
||||
gfx_queue.device().clone(),
|
||||
memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -96,7 +98,7 @@ impl PixelsDrawPipeline {
|
||||
)
|
||||
.unwrap();
|
||||
let index_buffer = CpuAccessibleBuffer::<[u32]>::from_iter(
|
||||
gfx_queue.device().clone(),
|
||||
memory_allocator,
|
||||
BufferUsage {
|
||||
index_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -18,6 +18,7 @@ use vulkano::{
|
||||
device::Queue,
|
||||
format::Format,
|
||||
image::ImageAccess,
|
||||
memory::allocator::MemoryAllocator,
|
||||
render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass},
|
||||
sync::GpuFuture,
|
||||
};
|
||||
@ -34,6 +35,7 @@ pub struct RenderPassPlaceOverFrame {
|
||||
impl RenderPassPlaceOverFrame {
|
||||
pub fn new(
|
||||
gfx_queue: Arc<Queue>,
|
||||
memory_allocator: &impl MemoryAllocator,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
descriptor_set_allocator: Rc<StandardDescriptorSetAllocator>,
|
||||
output_format: Format,
|
||||
@ -48,8 +50,8 @@ impl RenderPassPlaceOverFrame {
|
||||
}
|
||||
},
|
||||
pass: {
|
||||
color: [color],
|
||||
depth_stencil: {}
|
||||
color: [color],
|
||||
depth_stencil: {}
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
@ -57,6 +59,7 @@ impl RenderPassPlaceOverFrame {
|
||||
let pixels_draw_pipeline = PixelsDrawPipeline::new(
|
||||
gfx_queue.clone(),
|
||||
subpass,
|
||||
memory_allocator,
|
||||
command_buffer_allocator.clone(),
|
||||
descriptor_set_allocator,
|
||||
);
|
||||
|
@ -79,6 +79,7 @@ use vulkano::{
|
||||
image::{view::ImageView, AttachmentImage, ImageDimensions, SampleCount, StorageImage},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
multisample::MultisampleState,
|
||||
@ -151,13 +152,15 @@ fn main() {
|
||||
.unwrap();
|
||||
let queue = queues.next().unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
// Creating our intermediate multisampled image.
|
||||
//
|
||||
// As explained in the introduction, we pass the same dimensions and format as for the final
|
||||
// image. But we also pass the number of samples-per-pixel, which is 4 here.
|
||||
let intermediary = ImageView::new_default(
|
||||
AttachmentImage::transient_multisampled(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
[1024, 1024],
|
||||
SampleCount::Sample4,
|
||||
Format::R8G8B8A8_UNORM,
|
||||
@ -168,7 +171,7 @@ fn main() {
|
||||
|
||||
// This is the final image that will receive the anti-aliased triangle.
|
||||
let image = StorageImage::new(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 1024,
|
||||
height: 1024,
|
||||
@ -284,7 +287,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -314,10 +317,10 @@ fn main() {
|
||||
depth_range: 0.0..1.0,
|
||||
};
|
||||
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device);
|
||||
|
||||
let buf = CpuAccessibleBuffer::from_iter(
|
||||
device,
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_dst: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -30,6 +30,7 @@ use vulkano::{
|
||||
image::{view::ImageView, ImageAccess, ImageUsage, SwapchainImage},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::InputAssemblyState,
|
||||
@ -180,6 +181,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod)]
|
||||
struct Vertex {
|
||||
@ -199,7 +202,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -14,6 +14,7 @@ use crate::{
|
||||
use std::{collections::HashMap, rc::Rc, sync::Arc};
|
||||
use vulkano::command_buffer::allocator::StandardCommandBufferAllocator;
|
||||
use vulkano::descriptor_set::allocator::StandardDescriptorSetAllocator;
|
||||
use vulkano::memory::allocator::StandardMemoryAllocator;
|
||||
use vulkano::{device::Queue, format::Format};
|
||||
use vulkano_util::context::{VulkanoConfig, VulkanoContext};
|
||||
use vulkano_util::window::{VulkanoWindows, WindowDescriptor};
|
||||
@ -31,6 +32,7 @@ impl RenderPipeline {
|
||||
size: [u32; 2],
|
||||
swapchain_format: Format,
|
||||
) -> RenderPipeline {
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(gfx_queue.device().clone());
|
||||
let command_buffer_allocator = Rc::new(StandardCommandBufferAllocator::new(
|
||||
gfx_queue.device().clone(),
|
||||
));
|
||||
@ -41,12 +43,14 @@ impl RenderPipeline {
|
||||
RenderPipeline {
|
||||
compute: GameOfLifeComputePipeline::new(
|
||||
compute_queue,
|
||||
&memory_allocator,
|
||||
command_buffer_allocator.clone(),
|
||||
descriptor_set_allocator.clone(),
|
||||
size,
|
||||
),
|
||||
place_over_frame: RenderPassPlaceOverFrame::new(
|
||||
gfx_queue,
|
||||
&memory_allocator,
|
||||
command_buffer_allocator,
|
||||
descriptor_set_allocator,
|
||||
swapchain_format,
|
||||
|
@ -13,6 +13,7 @@ use std::{rc::Rc, sync::Arc};
|
||||
use vulkano::command_buffer::allocator::StandardCommandBufferAllocator;
|
||||
use vulkano::descriptor_set::allocator::StandardDescriptorSetAllocator;
|
||||
use vulkano::image::{ImageUsage, StorageImage};
|
||||
use vulkano::memory::allocator::MemoryAllocator;
|
||||
use vulkano::{
|
||||
buffer::{BufferUsage, CpuAccessibleBuffer},
|
||||
command_buffer::{AutoCommandBufferBuilder, CommandBufferUsage, PrimaryAutoCommandBuffer},
|
||||
@ -40,9 +41,12 @@ pub struct GameOfLifeComputePipeline {
|
||||
image: DeviceImageView,
|
||||
}
|
||||
|
||||
fn rand_grid(compute_queue: &Arc<Queue>, size: [u32; 2]) -> Arc<CpuAccessibleBuffer<[u32]>> {
|
||||
fn rand_grid(
|
||||
memory_allocator: &impl MemoryAllocator,
|
||||
size: [u32; 2],
|
||||
) -> Arc<CpuAccessibleBuffer<[u32]>> {
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
compute_queue.device().clone(),
|
||||
memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -58,12 +62,13 @@ fn rand_grid(compute_queue: &Arc<Queue>, size: [u32; 2]) -> Arc<CpuAccessibleBuf
|
||||
impl GameOfLifeComputePipeline {
|
||||
pub fn new(
|
||||
compute_queue: Arc<Queue>,
|
||||
memory_allocator: &impl MemoryAllocator,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
descriptor_set_allocator: Rc<StandardDescriptorSetAllocator>,
|
||||
size: [u32; 2],
|
||||
) -> GameOfLifeComputePipeline {
|
||||
let life_in = rand_grid(&compute_queue, size);
|
||||
let life_out = rand_grid(&compute_queue, size);
|
||||
let life_in = rand_grid(memory_allocator, size);
|
||||
let life_out = rand_grid(memory_allocator, size);
|
||||
|
||||
let compute_life_pipeline = {
|
||||
let shader = compute_life_cs::load(compute_queue.device().clone()).unwrap();
|
||||
@ -78,6 +83,7 @@ impl GameOfLifeComputePipeline {
|
||||
};
|
||||
|
||||
let image = StorageImage::general_purpose_image_view(
|
||||
memory_allocator,
|
||||
compute_queue.clone(),
|
||||
size,
|
||||
Format::R8G8B8A8_UNORM,
|
||||
|
@ -21,6 +21,7 @@ use vulkano::{
|
||||
device::Queue,
|
||||
image::ImageViewAbstract,
|
||||
impl_vertex,
|
||||
memory::allocator::MemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::InputAssemblyState,
|
||||
@ -81,12 +82,13 @@ impl PixelsDrawPipeline {
|
||||
pub fn new(
|
||||
gfx_queue: Arc<Queue>,
|
||||
subpass: Subpass,
|
||||
memory_allocator: &impl MemoryAllocator,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
descriptor_set_allocator: Rc<StandardDescriptorSetAllocator>,
|
||||
) -> PixelsDrawPipeline {
|
||||
let (vertices, indices) = textured_quad(2.0, 2.0);
|
||||
let vertex_buffer = CpuAccessibleBuffer::<[TexturedVertex]>::from_iter(
|
||||
gfx_queue.device().clone(),
|
||||
memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -96,7 +98,7 @@ impl PixelsDrawPipeline {
|
||||
)
|
||||
.unwrap();
|
||||
let index_buffer = CpuAccessibleBuffer::<[u32]>::from_iter(
|
||||
gfx_queue.device().clone(),
|
||||
memory_allocator,
|
||||
BufferUsage {
|
||||
index_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -18,6 +18,7 @@ use vulkano::{
|
||||
device::Queue,
|
||||
format::Format,
|
||||
image::ImageAccess,
|
||||
memory::allocator::MemoryAllocator,
|
||||
render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass},
|
||||
sync::GpuFuture,
|
||||
};
|
||||
@ -34,6 +35,7 @@ pub struct RenderPassPlaceOverFrame {
|
||||
impl RenderPassPlaceOverFrame {
|
||||
pub fn new(
|
||||
gfx_queue: Arc<Queue>,
|
||||
memory_allocator: &impl MemoryAllocator,
|
||||
command_buffer_allocator: Rc<StandardCommandBufferAllocator>,
|
||||
descriptor_set_allocator: Rc<StandardDescriptorSetAllocator>,
|
||||
output_format: Format,
|
||||
@ -48,8 +50,8 @@ impl RenderPassPlaceOverFrame {
|
||||
}
|
||||
},
|
||||
pass: {
|
||||
color: [color],
|
||||
depth_stencil: {}
|
||||
color: [color],
|
||||
depth_stencil: {}
|
||||
}
|
||||
)
|
||||
.unwrap();
|
||||
@ -57,6 +59,7 @@ impl RenderPassPlaceOverFrame {
|
||||
let pixels_draw_pipeline = PixelsDrawPipeline::new(
|
||||
gfx_queue.clone(),
|
||||
subpass,
|
||||
memory_allocator,
|
||||
command_buffer_allocator.clone(),
|
||||
descriptor_set_allocator,
|
||||
);
|
||||
|
@ -32,6 +32,7 @@ use vulkano::{
|
||||
},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo, InstanceExtensions},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::InputAssemblyState,
|
||||
@ -129,8 +130,10 @@ fn main() {
|
||||
|
||||
let queue = queues.next().unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
let image = StorageImage::with_usage(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 512,
|
||||
height: 512,
|
||||
@ -168,7 +171,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -281,7 +284,7 @@ fn main() {
|
||||
|
||||
let create_buffer = || {
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_dst: true,
|
||||
..BufferUsage::empty()
|
||||
@ -351,7 +354,7 @@ fn main() {
|
||||
|
||||
let command_buffer = builder.build().unwrap();
|
||||
|
||||
let future = sync::now(device.clone())
|
||||
let future = sync::now(device)
|
||||
.then_execute(queue, command_buffer)
|
||||
.unwrap()
|
||||
.then_signal_fence_and_flush()
|
||||
|
@ -20,13 +20,13 @@ use vulkano::{
|
||||
RenderPassBeginInfo, SubpassContents,
|
||||
},
|
||||
device::{
|
||||
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, DeviceOwned,
|
||||
QueueCreateInfo,
|
||||
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo,
|
||||
},
|
||||
format::Format,
|
||||
image::{view::ImageView, AttachmentImage, ImageAccess, ImageUsage, SwapchainImage},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
depth_stencil::DepthStencilState,
|
||||
@ -154,6 +154,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod)]
|
||||
struct Vertex {
|
||||
@ -209,7 +211,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -325,7 +327,12 @@ fn main() {
|
||||
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
|
||||
let mut framebuffers = window_size_dependent_setup(&images, render_pass.clone(), &mut viewport);
|
||||
let mut framebuffers = window_size_dependent_setup(
|
||||
&images,
|
||||
render_pass.clone(),
|
||||
&mut viewport,
|
||||
&memory_allocator,
|
||||
);
|
||||
|
||||
let mut recreate_swapchain = false;
|
||||
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
|
||||
@ -363,8 +370,12 @@ fn main() {
|
||||
};
|
||||
|
||||
swapchain = new_swapchain;
|
||||
framebuffers =
|
||||
window_size_dependent_setup(&new_images, render_pass.clone(), &mut viewport);
|
||||
framebuffers = window_size_dependent_setup(
|
||||
&new_images,
|
||||
render_pass.clone(),
|
||||
&mut viewport,
|
||||
&memory_allocator,
|
||||
);
|
||||
recreate_swapchain = false;
|
||||
}
|
||||
|
||||
@ -542,13 +553,14 @@ fn window_size_dependent_setup(
|
||||
images: &[Arc<SwapchainImage>],
|
||||
render_pass: Arc<RenderPass>,
|
||||
viewport: &mut Viewport,
|
||||
memory_allocator: &StandardMemoryAllocator,
|
||||
) -> Vec<Arc<Framebuffer>> {
|
||||
let dimensions = images[0].dimensions().width_height();
|
||||
viewport.dimensions = [dimensions[0] as f32, dimensions[1] as f32];
|
||||
|
||||
let depth_attachment = ImageView::new_default(
|
||||
AttachmentImage::with_usage(
|
||||
render_pass.device().clone(),
|
||||
memory_allocator,
|
||||
dimensions,
|
||||
Format::D16_UNORM,
|
||||
ImageUsage {
|
||||
|
@ -24,6 +24,7 @@ use vulkano::{
|
||||
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo,
|
||||
},
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
|
||||
sync::{self, GpuFuture},
|
||||
VulkanLibrary,
|
||||
@ -129,13 +130,14 @@ fn main() {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
|
||||
let data_buffer = {
|
||||
let data_iter = 0..65536u32;
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -26,6 +26,7 @@ use vulkano::{
|
||||
},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
color_blend::ColorBlendState,
|
||||
@ -154,6 +155,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod)]
|
||||
struct Vertex {
|
||||
@ -176,7 +179,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::<[Vertex]>::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -229,6 +232,7 @@ fn main() {
|
||||
reader.next_frame(&mut image_data).unwrap();
|
||||
|
||||
let image = ImmutableImage::from_iter(
|
||||
&memory_allocator,
|
||||
image_data,
|
||||
dimensions,
|
||||
MipmapsCount::One,
|
||||
|
@ -33,6 +33,7 @@ use vulkano::{
|
||||
image::{view::ImageView, ImageAccess, ImageUsage, SwapchainImage},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::InputAssemblyState,
|
||||
@ -221,6 +222,8 @@ fn main() {
|
||||
|
||||
let mut recreate_swapchain = false;
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
let vertices = [
|
||||
Vertex {
|
||||
position: [-1.0, 1.0],
|
||||
@ -236,7 +239,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -33,6 +33,7 @@ use vulkano::{
|
||||
},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
color_blend::ColorBlendState,
|
||||
@ -170,6 +171,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod)]
|
||||
struct Vertex {
|
||||
@ -242,7 +245,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::<[Vertex]>::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -296,6 +299,7 @@ fn main() {
|
||||
reader.next_frame(&mut image_data).unwrap();
|
||||
|
||||
let image = ImmutableImage::from_iter(
|
||||
&memory_allocator,
|
||||
image_data,
|
||||
dimensions,
|
||||
MipmapsCount::One,
|
||||
@ -323,6 +327,7 @@ fn main() {
|
||||
reader.next_frame(&mut image_data).unwrap();
|
||||
|
||||
let image = ImmutableImage::from_iter(
|
||||
&memory_allocator,
|
||||
image_data,
|
||||
dimensions,
|
||||
MipmapsCount::One,
|
||||
|
@ -23,6 +23,7 @@ use vulkano::{
|
||||
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo,
|
||||
},
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
|
||||
sync::{self, GpuFuture},
|
||||
VulkanLibrary,
|
||||
@ -116,6 +117,7 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
|
||||
@ -123,7 +125,7 @@ fn main() {
|
||||
// we intitialize half of the array and leave the other half to 0, we will use copy later to fill it
|
||||
let data_iter = (0..65536u32).map(|n| if n < 65536 / 2 { n } else { 0 });
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
transfer_src: true,
|
||||
|
@ -23,6 +23,7 @@ use vulkano::{
|
||||
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo,
|
||||
},
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
|
||||
sync::{self, GpuFuture},
|
||||
VulkanLibrary,
|
||||
@ -124,13 +125,14 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
|
||||
let data_buffer = {
|
||||
let data_iter = 0..65536u32;
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -41,6 +41,7 @@ use vulkano::{
|
||||
QueueCreateInfo,
|
||||
},
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
|
||||
sync::{self, GpuFuture},
|
||||
VulkanLibrary,
|
||||
@ -237,6 +238,7 @@ fn main() {
|
||||
future.wait(None).unwrap();
|
||||
}
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
|
||||
|
||||
@ -244,7 +246,7 @@ fn main() {
|
||||
let data_buffer = {
|
||||
let data_iter = 0..65536u32;
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -30,6 +30,7 @@ use vulkano::{
|
||||
image::{view::ImageView, ImageUsage},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::{InputAssemblyState, PrimitiveTopology},
|
||||
@ -317,6 +318,7 @@ fn main() {
|
||||
let vs = vs::load(device.clone()).unwrap();
|
||||
let fs = fs::load(device.clone()).unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
|
||||
@ -341,7 +343,7 @@ fn main() {
|
||||
|
||||
// Create a CPU accessible buffer initialized with the vertex data.
|
||||
let temporary_accessible_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_src: true,
|
||||
..BufferUsage::empty()
|
||||
@ -353,7 +355,7 @@ fn main() {
|
||||
|
||||
// Create a buffer array on the GPU with enough space for `PARTICLE_COUNT` number of `Vertex`.
|
||||
let device_local_buffer = DeviceLocalBuffer::<[Vertex]>::array(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
PARTICLE_COUNT as vulkano::DeviceSize,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
|
@ -21,6 +21,7 @@ use vulkano::{
|
||||
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo,
|
||||
},
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
|
||||
sync::{self, GpuFuture},
|
||||
VulkanLibrary,
|
||||
@ -125,13 +126,14 @@ fn main() {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
|
||||
let data_buffer = {
|
||||
let data_iter = 0..65536u32;
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -20,11 +20,13 @@ use vulkano::{
|
||||
allocator::StandardDescriptorSetAllocator, PersistentDescriptorSet, WriteDescriptorSet,
|
||||
},
|
||||
device::{
|
||||
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo,
|
||||
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, DeviceOwned,
|
||||
QueueCreateInfo,
|
||||
},
|
||||
format::Format,
|
||||
image::{view::ImageView, AttachmentImage, ImageAccess, ImageUsage, SwapchainImage},
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::{MemoryUsage, StandardMemoryAllocator},
|
||||
pipeline::{
|
||||
graphics::{
|
||||
depth_stencil::DepthStencilState,
|
||||
@ -156,8 +158,10 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
|
||||
|
||||
let vertex_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&*memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -167,7 +171,7 @@ fn main() {
|
||||
)
|
||||
.unwrap();
|
||||
let normals_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&*memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -177,7 +181,7 @@ fn main() {
|
||||
)
|
||||
.unwrap();
|
||||
let index_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&*memory_allocator,
|
||||
BufferUsage {
|
||||
index_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -188,11 +192,12 @@ fn main() {
|
||||
.unwrap();
|
||||
|
||||
let uniform_buffer = CpuBufferPool::<vs::ty::Data>::new(
|
||||
device.clone(),
|
||||
memory_allocator.clone(),
|
||||
BufferUsage {
|
||||
uniform_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
},
|
||||
MemoryUsage::Upload,
|
||||
);
|
||||
|
||||
let vs = vs::load(device.clone()).unwrap();
|
||||
@ -221,7 +226,7 @@ fn main() {
|
||||
.unwrap();
|
||||
|
||||
let (mut pipeline, mut framebuffers) =
|
||||
window_size_dependent_setup(device.clone(), &vs, &fs, &images, render_pass.clone());
|
||||
window_size_dependent_setup(&memory_allocator, &vs, &fs, &images, render_pass.clone());
|
||||
let mut recreate_swapchain = false;
|
||||
|
||||
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
|
||||
@ -266,7 +271,7 @@ fn main() {
|
||||
|
||||
swapchain = new_swapchain;
|
||||
let (new_pipeline, new_framebuffers) = window_size_dependent_setup(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
&vs,
|
||||
&fs,
|
||||
&new_images,
|
||||
@ -399,7 +404,7 @@ fn main() {
|
||||
|
||||
/// This method is called once during initialization, then again whenever the window is resized
|
||||
fn window_size_dependent_setup(
|
||||
device: Arc<Device>,
|
||||
memory_allocator: &StandardMemoryAllocator,
|
||||
vs: &ShaderModule,
|
||||
fs: &ShaderModule,
|
||||
images: &[Arc<SwapchainImage>],
|
||||
@ -408,7 +413,7 @@ fn window_size_dependent_setup(
|
||||
let dimensions = images[0].dimensions().width_height();
|
||||
|
||||
let depth_buffer = ImageView::new_default(
|
||||
AttachmentImage::transient(device.clone(), dimensions, Format::D16_UNORM).unwrap(),
|
||||
AttachmentImage::transient(memory_allocator, dimensions, Format::D16_UNORM).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -449,7 +454,7 @@ fn window_size_dependent_setup(
|
||||
.fragment_shader(fs.entry_point("main").unwrap(), ())
|
||||
.depth_stencil_state(DepthStencilState::simple_depth_test())
|
||||
.render_pass(Subpass::from(render_pass, 0).unwrap())
|
||||
.build(device)
|
||||
.build(memory_allocator.device().clone())
|
||||
.unwrap();
|
||||
|
||||
(pipeline, framebuffers)
|
||||
|
@ -33,6 +33,7 @@ use vulkano::{
|
||||
image::{view::ImageView, ImageAccess, ImageUsage, SwapchainImage},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::{InputAssemblyState, PrimitiveTopology},
|
||||
@ -262,6 +263,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod)]
|
||||
#[repr(C)]
|
||||
struct Vertex {
|
||||
@ -299,7 +302,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -28,6 +28,7 @@ use vulkano::{
|
||||
},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
color_blend::ColorBlendState,
|
||||
@ -160,6 +161,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Copy, Debug, Default, Zeroable, Pod)]
|
||||
struct Vertex {
|
||||
@ -182,7 +185,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::<[Vertex]>::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
@ -244,6 +247,7 @@ fn main() {
|
||||
array_layers: 3,
|
||||
}; // Replace with your actual image array dimensions
|
||||
let image = ImmutableImage::from_iter(
|
||||
&memory_allocator,
|
||||
image_array_data,
|
||||
dimensions,
|
||||
MipmapsCount::Log2,
|
||||
|
@ -36,6 +36,7 @@ use vulkano::{
|
||||
image::{view::ImageView, ImageAccess, ImageUsage, SwapchainImage},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::InputAssemblyState,
|
||||
@ -279,6 +280,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
// We now create a buffer that will store the shape of our triangle.
|
||||
// We use #[repr(C)] here to force rustc to not do anything funky with our data, although for this
|
||||
// particular example, it doesn't actually change the in-memory representation.
|
||||
@ -301,7 +304,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -30,6 +30,7 @@ use vulkano::{
|
||||
image::{view::ImageView, ImageAccess, ImageUsage, SwapchainImage},
|
||||
impl_vertex,
|
||||
instance::{Instance, InstanceCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{
|
||||
graphics::{
|
||||
input_assembly::InputAssemblyState,
|
||||
@ -259,6 +260,8 @@ fn main() {
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
// We now create a buffer that will store the shape of our triangle.
|
||||
// We use #[repr(C)] here to force rustc to not do anything funky with our data, although for this
|
||||
// particular example, it doesn't actually change the in-memory representation.
|
||||
@ -281,7 +284,7 @@ fn main() {
|
||||
},
|
||||
];
|
||||
let vertex_buffer = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -17,6 +17,7 @@ use vulkano::{
|
||||
debug::{DebugUtilsMessenger, DebugUtilsMessengerCreateInfo},
|
||||
Instance, InstanceCreateInfo, InstanceExtensions,
|
||||
},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
Version, VulkanLibrary,
|
||||
};
|
||||
|
||||
@ -105,6 +106,7 @@ pub struct VulkanoContext {
|
||||
device: Arc<Device>,
|
||||
graphics_queue: Arc<Queue>,
|
||||
compute_queue: Arc<Queue>,
|
||||
memory_allocator: Arc<StandardMemoryAllocator>,
|
||||
}
|
||||
|
||||
impl Default for VulkanoContext {
|
||||
@ -173,12 +175,15 @@ impl VulkanoContext {
|
||||
config.device_features,
|
||||
);
|
||||
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
|
||||
|
||||
Self {
|
||||
instance,
|
||||
_debug_utils_messenger,
|
||||
device,
|
||||
graphics_queue,
|
||||
compute_queue,
|
||||
memory_allocator,
|
||||
}
|
||||
}
|
||||
|
||||
@ -292,4 +297,10 @@ impl VulkanoContext {
|
||||
pub fn compute_queue(&self) -> &Arc<Queue> {
|
||||
&self.compute_queue
|
||||
}
|
||||
|
||||
/// Returns the memory allocator.
|
||||
#[inline]
|
||||
pub fn memory_allocator(&self) -> &Arc<StandardMemoryAllocator> {
|
||||
&self.memory_allocator
|
||||
}
|
||||
}
|
||||
|
@ -16,6 +16,7 @@ use vulkano::{
|
||||
image::{
|
||||
view::ImageView, ImageAccess, ImageUsage, ImageViewAbstract, StorageImage, SwapchainImage,
|
||||
},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
swapchain::{
|
||||
self, AcquireError, Surface, Swapchain, SwapchainCreateInfo, SwapchainCreationError,
|
||||
SwapchainPresentInfo,
|
||||
@ -46,6 +47,7 @@ pub struct VulkanoWindowRenderer {
|
||||
compute_queue: Arc<Queue>,
|
||||
swapchain: Arc<Swapchain>,
|
||||
final_views: Vec<SwapchainImageView>,
|
||||
memory_allocator: Arc<StandardMemoryAllocator>,
|
||||
/// Additional image views that you can add which are resized with the window.
|
||||
/// Use associated functions to get access to these.
|
||||
additional_image_views: HashMap<usize, DeviceImageView>,
|
||||
@ -64,6 +66,7 @@ impl VulkanoWindowRenderer {
|
||||
window: winit::window::Window,
|
||||
descriptor: &WindowDescriptor,
|
||||
swapchain_create_info_modify: fn(&mut SwapchainCreateInfo),
|
||||
memory_allocator: Arc<StandardMemoryAllocator>,
|
||||
) -> VulkanoWindowRenderer {
|
||||
// Create rendering surface from window
|
||||
let surface =
|
||||
@ -86,6 +89,7 @@ impl VulkanoWindowRenderer {
|
||||
compute_queue: vulkano_context.compute_queue().clone(),
|
||||
swapchain: swap_chain,
|
||||
final_views,
|
||||
memory_allocator,
|
||||
additional_image_views: HashMap::default(),
|
||||
recreate_swapchain: false,
|
||||
previous_frame_end,
|
||||
@ -239,6 +243,7 @@ impl VulkanoWindowRenderer {
|
||||
pub fn add_additional_image_view(&mut self, key: usize, format: Format, usage: ImageUsage) {
|
||||
let size = self.swapchain_image_size();
|
||||
let image = StorageImage::general_purpose_image_view(
|
||||
&*self.memory_allocator,
|
||||
self.graphics_queue.clone(),
|
||||
size,
|
||||
format,
|
||||
|
@ -168,6 +168,7 @@ impl VulkanoWindows {
|
||||
winit_window,
|
||||
window_descriptor,
|
||||
swapchain_create_info_modify,
|
||||
vulkano_context.memory_allocator().clone(),
|
||||
),
|
||||
);
|
||||
|
||||
|
@ -17,17 +17,18 @@
|
||||
//! or write and write simultaneously will block.
|
||||
|
||||
use super::{
|
||||
sys::UnsafeBuffer, BufferAccess, BufferAccessObject, BufferContents, BufferInner, BufferUsage,
|
||||
sys::UnsafeBuffer, BufferAccess, BufferAccessObject, BufferContents, BufferCreationError,
|
||||
BufferInner, BufferUsage,
|
||||
};
|
||||
use crate::{
|
||||
buffer::{sys::UnsafeBufferCreateInfo, BufferCreationError, TypedBufferAccess},
|
||||
buffer::{sys::UnsafeBufferCreateInfo, TypedBufferAccess},
|
||||
device::{Device, DeviceOwned},
|
||||
memory::{
|
||||
pool::{
|
||||
AllocFromRequirementsFilter, AllocLayout, MappingRequirement, MemoryPoolAlloc,
|
||||
PotentialDedicatedAllocation, StandardMemoryPoolAlloc,
|
||||
allocator::{
|
||||
AllocationCreateInfo, AllocationCreationError, AllocationType, MemoryAlloc,
|
||||
MemoryAllocatePreference, MemoryAllocator, MemoryUsage,
|
||||
},
|
||||
DedicatedAllocation, DeviceMemoryError, MemoryPool,
|
||||
DedicatedAllocation,
|
||||
},
|
||||
sync::Sharing,
|
||||
DeviceSize,
|
||||
@ -51,7 +52,7 @@ use std::{
|
||||
/// memory caches GPU data on the CPU side. This can be more performant in cases where
|
||||
/// the cpu needs to read data coming off the GPU.
|
||||
#[derive(Debug)]
|
||||
pub struct CpuAccessibleBuffer<T, A = PotentialDedicatedAllocation<StandardMemoryPoolAlloc>>
|
||||
pub struct CpuAccessibleBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
{
|
||||
@ -59,7 +60,7 @@ where
|
||||
inner: Arc<UnsafeBuffer>,
|
||||
|
||||
// The memory held by the buffer.
|
||||
memory: A,
|
||||
memory: MemoryAlloc,
|
||||
|
||||
// Queue families allowed to access this buffer.
|
||||
queue_family_indices: SmallVec<[u32; 4]>,
|
||||
@ -77,17 +78,15 @@ where
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
/// - Panics if `usage.shader_device_address` is `true`.
|
||||
// TODO: ^
|
||||
pub fn from_data(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
usage: BufferUsage,
|
||||
host_cached: bool,
|
||||
data: T,
|
||||
) -> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryError> {
|
||||
) -> Result<Arc<CpuAccessibleBuffer<T>>, AllocationCreationError> {
|
||||
unsafe {
|
||||
let uninitialized = CpuAccessibleBuffer::raw(
|
||||
device,
|
||||
allocator,
|
||||
size_of::<T>() as DeviceSize,
|
||||
usage,
|
||||
host_cached,
|
||||
@ -113,11 +112,17 @@ where
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
pub unsafe fn uninitialized(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
usage: BufferUsage,
|
||||
host_cached: bool,
|
||||
) -> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryError> {
|
||||
CpuAccessibleBuffer::raw(device, size_of::<T>() as DeviceSize, usage, host_cached, [])
|
||||
) -> Result<Arc<CpuAccessibleBuffer<T>>, AllocationCreationError> {
|
||||
CpuAccessibleBuffer::raw(
|
||||
allocator,
|
||||
size_of::<T>() as DeviceSize,
|
||||
usage,
|
||||
host_cached,
|
||||
[],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -132,14 +137,12 @@ where
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
/// - Panics if `data` is empty.
|
||||
/// - Panics if `usage.shader_device_address` is `true`.
|
||||
// TODO: ^
|
||||
pub fn from_iter<I>(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
usage: BufferUsage,
|
||||
host_cached: bool,
|
||||
data: I,
|
||||
) -> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryError>
|
||||
) -> Result<Arc<CpuAccessibleBuffer<[T]>>, AllocationCreationError>
|
||||
where
|
||||
I: IntoIterator<Item = T>,
|
||||
I::IntoIter: ExactSizeIterator,
|
||||
@ -148,7 +151,7 @@ where
|
||||
|
||||
unsafe {
|
||||
let uninitialized = CpuAccessibleBuffer::uninitialized_array(
|
||||
device,
|
||||
allocator,
|
||||
data.len() as DeviceSize,
|
||||
usage,
|
||||
host_cached,
|
||||
@ -176,16 +179,14 @@ where
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
/// - Panics if `len` is zero.
|
||||
/// - Panics if `usage.shader_device_address` is `true`.
|
||||
// TODO: ^
|
||||
pub unsafe fn uninitialized_array(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
len: DeviceSize,
|
||||
usage: BufferUsage,
|
||||
host_cached: bool,
|
||||
) -> Result<Arc<CpuAccessibleBuffer<[T]>>, DeviceMemoryError> {
|
||||
) -> Result<Arc<CpuAccessibleBuffer<[T]>>, AllocationCreationError> {
|
||||
CpuAccessibleBuffer::raw(
|
||||
device,
|
||||
allocator,
|
||||
len * size_of::<T>() as DeviceSize,
|
||||
usage,
|
||||
host_cached,
|
||||
@ -207,75 +208,70 @@ where
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `size` is zero.
|
||||
/// - Panics if `usage.shader_device_address` is `true`.
|
||||
// TODO: ^
|
||||
pub unsafe fn raw(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
size: DeviceSize,
|
||||
usage: BufferUsage,
|
||||
host_cached: bool,
|
||||
queue_family_indices: impl IntoIterator<Item = u32>,
|
||||
) -> Result<Arc<CpuAccessibleBuffer<T>>, DeviceMemoryError> {
|
||||
) -> Result<Arc<CpuAccessibleBuffer<T>>, AllocationCreationError> {
|
||||
let queue_family_indices: SmallVec<[_; 4]> = queue_family_indices.into_iter().collect();
|
||||
|
||||
let buffer = {
|
||||
match UnsafeBuffer::new(
|
||||
device.clone(),
|
||||
UnsafeBufferCreateInfo {
|
||||
sharing: if queue_family_indices.len() >= 2 {
|
||||
Sharing::Concurrent(queue_family_indices.clone())
|
||||
} else {
|
||||
Sharing::Exclusive
|
||||
},
|
||||
size,
|
||||
usage,
|
||||
..Default::default()
|
||||
},
|
||||
) {
|
||||
Ok(b) => b,
|
||||
Err(BufferCreationError::AllocError(err)) => return Err(err),
|
||||
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
|
||||
// errors can't happen
|
||||
}
|
||||
};
|
||||
let mem_reqs = buffer.memory_requirements();
|
||||
|
||||
let memory = MemoryPool::alloc_from_requirements(
|
||||
&device.standard_memory_pool(),
|
||||
&mem_reqs,
|
||||
AllocLayout::Linear,
|
||||
MappingRequirement::Map,
|
||||
Some(DedicatedAllocation::Buffer(&buffer)),
|
||||
|m| {
|
||||
if m.property_flags.host_cached {
|
||||
if host_cached {
|
||||
AllocFromRequirementsFilter::Preferred
|
||||
} else {
|
||||
AllocFromRequirementsFilter::Allowed
|
||||
}
|
||||
let buffer = UnsafeBuffer::new(
|
||||
allocator.device().clone(),
|
||||
UnsafeBufferCreateInfo {
|
||||
sharing: if queue_family_indices.len() >= 2 {
|
||||
Sharing::Concurrent(queue_family_indices.clone())
|
||||
} else {
|
||||
if host_cached {
|
||||
AllocFromRequirementsFilter::Allowed
|
||||
} else {
|
||||
AllocFromRequirementsFilter::Preferred
|
||||
}
|
||||
}
|
||||
Sharing::Exclusive
|
||||
},
|
||||
size,
|
||||
usage,
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
debug_assert!((memory.offset() % mem_reqs.alignment) == 0);
|
||||
debug_assert!(memory.mapped_memory().is_some());
|
||||
buffer.bind_memory(memory.memory(), memory.offset())?;
|
||||
)
|
||||
.map_err(|err| match err {
|
||||
BufferCreationError::AllocError(err) => err,
|
||||
// We don't use sparse-binding, therefore the other errors can't happen.
|
||||
_ => unreachable!(),
|
||||
})?;
|
||||
let requirements = buffer.memory_requirements();
|
||||
let create_info = AllocationCreateInfo {
|
||||
requirements,
|
||||
allocation_type: AllocationType::Linear,
|
||||
usage: if host_cached {
|
||||
MemoryUsage::Download
|
||||
} else {
|
||||
MemoryUsage::Upload
|
||||
},
|
||||
allocate_preference: MemoryAllocatePreference::Unknown,
|
||||
dedicated_allocation: Some(DedicatedAllocation::Buffer(&buffer)),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
Ok(Arc::new(CpuAccessibleBuffer {
|
||||
inner: buffer,
|
||||
memory,
|
||||
queue_family_indices,
|
||||
marker: PhantomData,
|
||||
}))
|
||||
match allocator.allocate_unchecked(create_info) {
|
||||
Ok(mut alloc) => {
|
||||
debug_assert!(alloc.offset() % requirements.alignment == 0);
|
||||
debug_assert!(alloc.size() == requirements.size);
|
||||
// The implementation might require a larger size than we wanted. With this it is
|
||||
// easier to invalidate and flush the whole buffer. It does not affect the
|
||||
// allocation in any way.
|
||||
alloc.shrink(size);
|
||||
buffer.bind_memory(alloc.device_memory(), alloc.offset())?;
|
||||
|
||||
Ok(Arc::new(CpuAccessibleBuffer {
|
||||
inner: buffer,
|
||||
memory: alloc,
|
||||
queue_family_indices,
|
||||
marker: PhantomData,
|
||||
}))
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> CpuAccessibleBuffer<T, A>
|
||||
impl<T> CpuAccessibleBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
{
|
||||
@ -285,10 +281,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> CpuAccessibleBuffer<T, A>
|
||||
impl<T> CpuAccessibleBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
/// Locks the buffer in order to read its content from the CPU.
|
||||
///
|
||||
@ -299,7 +294,7 @@ where
|
||||
/// After this function successfully locks the buffer, any attempt to submit a command buffer
|
||||
/// that uses it in exclusive mode will fail. You can still submit this buffer for non-exclusive
|
||||
/// accesses (ie. reads).
|
||||
pub fn read(&self) -> Result<ReadLock<'_, T, A>, ReadLockError> {
|
||||
pub fn read(&self) -> Result<ReadLock<'_, T>, ReadLockError> {
|
||||
let mut state = self.inner.state();
|
||||
let buffer_range = self.inner().offset..self.inner().offset + self.size();
|
||||
|
||||
@ -308,20 +303,14 @@ where
|
||||
state.cpu_read_lock(buffer_range.clone());
|
||||
}
|
||||
|
||||
let mapped_memory = self.memory.mapped_memory().unwrap();
|
||||
let offset = self.memory.offset();
|
||||
let memory_range = offset..offset + self.inner.size();
|
||||
|
||||
let bytes = unsafe {
|
||||
// If there are other read locks being held at this point, they also called
|
||||
// `invalidate_range` when locking. The GPU can't write data while the CPU holds a read
|
||||
// lock, so there will no new data and this call will do nothing.
|
||||
// TODO: probably still more efficient to call it only if we're the first to acquire a
|
||||
// read lock, but the number of CPU locks isn't currently tracked anywhere.
|
||||
mapped_memory
|
||||
.invalidate_range(memory_range.clone())
|
||||
.unwrap();
|
||||
mapped_memory.read(memory_range).unwrap()
|
||||
self.memory.invalidate_range(0..self.size()).unwrap();
|
||||
self.memory.mapped_slice().unwrap()
|
||||
};
|
||||
|
||||
Ok(ReadLock {
|
||||
@ -339,7 +328,7 @@ where
|
||||
///
|
||||
/// After this function successfully locks the buffer, any attempt to submit a command buffer
|
||||
/// that uses it and any attempt to call `read()` will return an error.
|
||||
pub fn write(&self) -> Result<WriteLock<'_, T, A>, WriteLockError> {
|
||||
pub fn write(&self) -> Result<WriteLock<'_, T>, WriteLockError> {
|
||||
let mut state = self.inner.state();
|
||||
let buffer_range = self.inner().offset..self.inner().offset + self.size();
|
||||
|
||||
@ -348,30 +337,22 @@ where
|
||||
state.cpu_write_lock(buffer_range.clone());
|
||||
}
|
||||
|
||||
let mapped_memory = self.memory.mapped_memory().unwrap();
|
||||
let offset = self.memory.offset();
|
||||
let memory_range = offset..offset + self.size();
|
||||
|
||||
let bytes = unsafe {
|
||||
mapped_memory
|
||||
.invalidate_range(memory_range.clone())
|
||||
.unwrap();
|
||||
mapped_memory.write(memory_range.clone()).unwrap()
|
||||
self.memory.invalidate_range(0..self.size()).unwrap();
|
||||
self.memory.write(0..self.size()).unwrap()
|
||||
};
|
||||
|
||||
Ok(WriteLock {
|
||||
inner: self,
|
||||
buffer_range,
|
||||
memory_range,
|
||||
data: T::from_bytes_mut(bytes).unwrap(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, A> BufferAccess for CpuAccessibleBuffer<T, A>
|
||||
unsafe impl<T> BufferAccess for CpuAccessibleBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync,
|
||||
{
|
||||
fn inner(&self) -> BufferInner<'_> {
|
||||
BufferInner {
|
||||
@ -385,25 +366,23 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> BufferAccessObject for Arc<CpuAccessibleBuffer<T, A>>
|
||||
impl<T> BufferAccessObject for Arc<CpuAccessibleBuffer<T>>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync + 'static,
|
||||
{
|
||||
fn as_buffer_access_object(&self) -> Arc<dyn BufferAccess> {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, A> TypedBufferAccess for CpuAccessibleBuffer<T, A>
|
||||
unsafe impl<T> TypedBufferAccess for CpuAccessibleBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync,
|
||||
{
|
||||
type Content = T;
|
||||
}
|
||||
|
||||
unsafe impl<T, A> DeviceOwned for CpuAccessibleBuffer<T, A>
|
||||
unsafe impl<T> DeviceOwned for CpuAccessibleBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
{
|
||||
@ -412,27 +391,20 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> PartialEq for CpuAccessibleBuffer<T, A>
|
||||
impl<T> PartialEq for CpuAccessibleBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync,
|
||||
{
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner() == other.inner() && self.size() == other.size()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> Eq for CpuAccessibleBuffer<T, A>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync,
|
||||
{
|
||||
}
|
||||
impl<T> Eq for CpuAccessibleBuffer<T> where T: BufferContents + ?Sized {}
|
||||
|
||||
impl<T, A> Hash for CpuAccessibleBuffer<T, A>
|
||||
impl<T> Hash for CpuAccessibleBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync,
|
||||
{
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.inner().hash(state);
|
||||
@ -445,20 +417,18 @@ where
|
||||
/// Note that this object holds a rwlock read guard on the chunk. If another thread tries to access
|
||||
/// this buffer's content or tries to submit a GPU command that uses this buffer, it will block.
|
||||
#[derive(Debug)]
|
||||
pub struct ReadLock<'a, T, A>
|
||||
pub struct ReadLock<'a, T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
inner: &'a CpuAccessibleBuffer<T, A>,
|
||||
inner: &'a CpuAccessibleBuffer<T>,
|
||||
buffer_range: Range<DeviceSize>,
|
||||
data: &'a T,
|
||||
}
|
||||
|
||||
impl<'a, T, A> Drop for ReadLock<'a, T, A>
|
||||
impl<'a, T> Drop for ReadLock<'a, T>
|
||||
where
|
||||
T: BufferContents + ?Sized + 'a,
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
@ -468,10 +438,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, A> Deref for ReadLock<'a, T, A>
|
||||
impl<'a, T> Deref for ReadLock<'a, T>
|
||||
where
|
||||
T: BufferContents + ?Sized + 'a,
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
type Target = T;
|
||||
|
||||
@ -485,30 +454,22 @@ where
|
||||
/// Note that this object holds a rwlock write guard on the chunk. If another thread tries to access
|
||||
/// this buffer's content or tries to submit a GPU command that uses this buffer, it will block.
|
||||
#[derive(Debug)]
|
||||
pub struct WriteLock<'a, T, A>
|
||||
pub struct WriteLock<'a, T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
inner: &'a CpuAccessibleBuffer<T, A>,
|
||||
inner: &'a CpuAccessibleBuffer<T>,
|
||||
buffer_range: Range<DeviceSize>,
|
||||
memory_range: Range<DeviceSize>,
|
||||
data: &'a mut T,
|
||||
}
|
||||
|
||||
impl<'a, T, A> Drop for WriteLock<'a, T, A>
|
||||
impl<'a, T> Drop for WriteLock<'a, T>
|
||||
where
|
||||
T: BufferContents + ?Sized + 'a,
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
self.inner
|
||||
.memory
|
||||
.mapped_memory()
|
||||
.unwrap()
|
||||
.flush_range(self.memory_range.clone())
|
||||
.unwrap();
|
||||
self.inner.memory.flush_range(0..self.inner.size()).unwrap();
|
||||
|
||||
let mut state = self.inner.inner.state();
|
||||
state.cpu_write_unlock(self.buffer_range.clone());
|
||||
@ -516,10 +477,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, A> Deref for WriteLock<'a, T, A>
|
||||
impl<'a, T> Deref for WriteLock<'a, T>
|
||||
where
|
||||
T: BufferContents + ?Sized + 'a,
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
type Target = T;
|
||||
|
||||
@ -528,10 +488,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, A> DerefMut for WriteLock<'a, T, A>
|
||||
impl<'a, T> DerefMut for WriteLock<'a, T>
|
||||
where
|
||||
T: BufferContents + ?Sized + 'a,
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
self.data
|
||||
@ -592,17 +551,19 @@ impl Display for WriteLockError {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::buffer::{BufferUsage, CpuAccessibleBuffer};
|
||||
use super::*;
|
||||
use crate::memory::allocator::StandardMemoryAllocator;
|
||||
|
||||
#[test]
|
||||
fn create_empty_buffer() {
|
||||
let (device, _queue) = gfx_dev_and_queue!();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
|
||||
const EMPTY: [i32; 0] = [];
|
||||
|
||||
assert_should_panic!({
|
||||
CpuAccessibleBuffer::from_data(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_dst: true,
|
||||
..BufferUsage::empty()
|
||||
@ -612,7 +573,7 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
CpuAccessibleBuffer::from_iter(
|
||||
device,
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_dst: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -15,13 +15,13 @@ use super::{
|
||||
use crate::{
|
||||
device::{Device, DeviceOwned},
|
||||
memory::{
|
||||
pool::{
|
||||
AllocFromRequirementsFilter, AllocLayout, MappingRequirement, MemoryPoolAlloc,
|
||||
PotentialDedicatedAllocation, StandardMemoryPool,
|
||||
allocator::{
|
||||
AllocationCreateInfo, AllocationCreationError, AllocationType, MemoryAlloc,
|
||||
MemoryAllocatePreference, MemoryAllocator, MemoryUsage, StandardMemoryAllocator,
|
||||
},
|
||||
DedicatedAllocation, DeviceMemoryError, MemoryPool,
|
||||
DedicatedAllocation,
|
||||
},
|
||||
DeviceSize, OomError,
|
||||
DeviceSize,
|
||||
};
|
||||
use std::{
|
||||
hash::{Hash, Hasher},
|
||||
@ -65,12 +65,12 @@ use std::{
|
||||
/// use vulkano::command_buffer::CommandBufferUsage;
|
||||
/// use vulkano::command_buffer::PrimaryCommandBufferAbstract;
|
||||
/// use vulkano::sync::GpuFuture;
|
||||
/// # let device: std::sync::Arc<vulkano::device::Device> = return;
|
||||
/// # let queue: std::sync::Arc<vulkano::device::Queue> = return;
|
||||
/// # let memory_allocator: std::sync::Arc<vulkano::memory::allocator::StandardMemoryAllocator> = return;
|
||||
/// # let command_buffer_allocator: vulkano::command_buffer::allocator::StandardCommandBufferAllocator = return;
|
||||
///
|
||||
/// // Create the ring buffer.
|
||||
/// let buffer = CpuBufferPool::upload(device.clone());
|
||||
/// let buffer = CpuBufferPool::upload(memory_allocator);
|
||||
///
|
||||
/// for n in 0 .. 25u32 {
|
||||
/// // Each loop grabs a new entry from that ring buffer and stores ` data` in it.
|
||||
@ -95,22 +95,21 @@ use std::{
|
||||
/// .unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
pub struct CpuBufferPool<T, A = Arc<StandardMemoryPool>>
|
||||
pub struct CpuBufferPool<T, A = StandardMemoryAllocator>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
A: MemoryAllocator + ?Sized,
|
||||
{
|
||||
// The device of the pool.
|
||||
device: Arc<Device>,
|
||||
|
||||
// The memory pool to use for allocations.
|
||||
pool: A,
|
||||
allocator: Arc<A>,
|
||||
|
||||
// Current buffer from which elements are grabbed.
|
||||
current_buffer: Mutex<Option<Arc<ActualBuffer<A>>>>,
|
||||
current_buffer: Mutex<Option<Arc<ActualBuffer>>>,
|
||||
|
||||
// Buffer usage.
|
||||
usage: BufferUsage,
|
||||
buffer_usage: BufferUsage,
|
||||
|
||||
memory_usage: MemoryUsage,
|
||||
|
||||
// Necessary to make it compile.
|
||||
marker: PhantomData<Box<T>>,
|
||||
@ -118,15 +117,12 @@ where
|
||||
|
||||
// One buffer of the pool.
|
||||
#[derive(Debug)]
|
||||
struct ActualBuffer<A>
|
||||
where
|
||||
A: MemoryPool,
|
||||
{
|
||||
struct ActualBuffer {
|
||||
// Inner content.
|
||||
inner: Arc<UnsafeBuffer>,
|
||||
|
||||
// The memory held by the buffer.
|
||||
memory: PotentialDedicatedAllocation<A::Alloc>,
|
||||
memory: MemoryAlloc,
|
||||
|
||||
// List of the chunks that are reserved.
|
||||
chunks_in_use: Mutex<Vec<ActualBufferChunk>>,
|
||||
@ -154,12 +150,11 @@ struct ActualBufferChunk {
|
||||
/// A subbuffer allocated from a `CpuBufferPool`.
|
||||
///
|
||||
/// When this object is destroyed, the subbuffer is automatically reclaimed by the pool.
|
||||
pub struct CpuBufferPoolChunk<T, A>
|
||||
pub struct CpuBufferPoolChunk<T>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
buffer: Arc<ActualBuffer<A>>,
|
||||
buffer: Arc<ActualBuffer>,
|
||||
|
||||
// Index of the subbuffer within `buffer`. In number of elements.
|
||||
index: DeviceSize,
|
||||
@ -179,37 +174,38 @@ where
|
||||
/// A subbuffer allocated from a `CpuBufferPool`.
|
||||
///
|
||||
/// When this object is destroyed, the subbuffer is automatically reclaimed by the pool.
|
||||
pub struct CpuBufferPoolSubbuffer<T, A>
|
||||
pub struct CpuBufferPoolSubbuffer<T>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
// This struct is just a wrapper around `CpuBufferPoolChunk`.
|
||||
chunk: CpuBufferPoolChunk<T, A>,
|
||||
chunk: CpuBufferPoolChunk<T>,
|
||||
}
|
||||
|
||||
impl<T> CpuBufferPool<T>
|
||||
impl<T, A> CpuBufferPool<T, A>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryAllocator + ?Sized,
|
||||
{
|
||||
/// Builds a `CpuBufferPool`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
/// - Panics if `usage.shader_device_address` is `true`.
|
||||
// TODO: ^
|
||||
#[inline]
|
||||
pub fn new(device: Arc<Device>, usage: BufferUsage) -> CpuBufferPool<T> {
|
||||
/// - Panics if `memory_usage` is [`MemoryUsage::GpuOnly`].
|
||||
pub fn new(
|
||||
allocator: Arc<A>,
|
||||
buffer_usage: BufferUsage,
|
||||
memory_usage: MemoryUsage,
|
||||
) -> CpuBufferPool<T, A> {
|
||||
assert!(size_of::<T>() > 0);
|
||||
assert!(!usage.shader_device_address);
|
||||
let pool = device.standard_memory_pool();
|
||||
assert!(memory_usage != MemoryUsage::GpuOnly);
|
||||
|
||||
CpuBufferPool {
|
||||
device,
|
||||
pool,
|
||||
allocator,
|
||||
current_buffer: Mutex::new(None),
|
||||
usage,
|
||||
buffer_usage,
|
||||
memory_usage,
|
||||
marker: PhantomData,
|
||||
}
|
||||
}
|
||||
@ -222,14 +218,14 @@ where
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
#[inline]
|
||||
pub fn upload(device: Arc<Device>) -> CpuBufferPool<T> {
|
||||
pub fn upload(allocator: Arc<A>) -> CpuBufferPool<T, A> {
|
||||
CpuBufferPool::new(
|
||||
device,
|
||||
allocator,
|
||||
BufferUsage {
|
||||
transfer_src: true,
|
||||
..BufferUsage::empty()
|
||||
},
|
||||
MemoryUsage::Upload,
|
||||
)
|
||||
}
|
||||
|
||||
@ -241,14 +237,14 @@ where
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
#[inline]
|
||||
pub fn download(device: Arc<Device>) -> CpuBufferPool<T> {
|
||||
pub fn download(allocator: Arc<A>) -> CpuBufferPool<T, A> {
|
||||
CpuBufferPool::new(
|
||||
device,
|
||||
allocator,
|
||||
BufferUsage {
|
||||
transfer_dst: true,
|
||||
..BufferUsage::empty()
|
||||
},
|
||||
MemoryUsage::Download,
|
||||
)
|
||||
}
|
||||
|
||||
@ -260,14 +256,14 @@ where
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
#[inline]
|
||||
pub fn uniform_buffer(device: Arc<Device>) -> CpuBufferPool<T> {
|
||||
pub fn uniform_buffer(allocator: Arc<A>) -> CpuBufferPool<T, A> {
|
||||
CpuBufferPool::new(
|
||||
device,
|
||||
allocator,
|
||||
BufferUsage {
|
||||
uniform_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
},
|
||||
MemoryUsage::Upload,
|
||||
)
|
||||
}
|
||||
|
||||
@ -279,14 +275,14 @@ where
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
#[inline]
|
||||
pub fn vertex_buffer(device: Arc<Device>) -> CpuBufferPool<T> {
|
||||
pub fn vertex_buffer(allocator: Arc<A>) -> CpuBufferPool<T, A> {
|
||||
CpuBufferPool::new(
|
||||
device,
|
||||
allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
},
|
||||
MemoryUsage::Upload,
|
||||
)
|
||||
}
|
||||
|
||||
@ -298,14 +294,14 @@ where
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
#[inline]
|
||||
pub fn indirect_buffer(device: Arc<Device>) -> CpuBufferPool<T> {
|
||||
pub fn indirect_buffer(allocator: Arc<A>) -> CpuBufferPool<T, A> {
|
||||
CpuBufferPool::new(
|
||||
device,
|
||||
allocator,
|
||||
BufferUsage {
|
||||
indirect_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
},
|
||||
MemoryUsage::Upload,
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -313,7 +309,7 @@ where
|
||||
impl<T, A> CpuBufferPool<T, A>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
A: MemoryAllocator + ?Sized,
|
||||
{
|
||||
/// Returns the current capacity of the pool, in number of elements.
|
||||
pub fn capacity(&self) -> DeviceSize {
|
||||
@ -327,7 +323,7 @@ where
|
||||
/// case.
|
||||
///
|
||||
/// Since this can involve a memory allocation, an `OomError` can happen.
|
||||
pub fn reserve(&self, capacity: DeviceSize) -> Result<(), DeviceMemoryError> {
|
||||
pub fn reserve(&self, capacity: DeviceSize) -> Result<(), AllocationCreationError> {
|
||||
if capacity == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
@ -352,11 +348,10 @@ where
|
||||
///
|
||||
/// > **Note**: You can think of it like a `Vec`. If you insert an element and the `Vec` is not
|
||||
/// > large enough, a new chunk of memory is automatically allocated.
|
||||
#[inline]
|
||||
pub fn from_data(
|
||||
&self,
|
||||
data: T,
|
||||
) -> Result<Arc<CpuBufferPoolSubbuffer<T, A>>, DeviceMemoryError> {
|
||||
) -> Result<Arc<CpuBufferPoolSubbuffer<T>>, AllocationCreationError> {
|
||||
Ok(Arc::new(CpuBufferPoolSubbuffer {
|
||||
chunk: self.chunk_impl([data].into_iter())?,
|
||||
}))
|
||||
@ -373,9 +368,10 @@ where
|
||||
/// # Panic
|
||||
///
|
||||
/// Panics if the length of the iterator didn't match the actual number of elements.
|
||||
///
|
||||
#[inline]
|
||||
pub fn from_iter<I>(&self, iter: I) -> Result<Arc<CpuBufferPoolChunk<T, A>>, DeviceMemoryError>
|
||||
pub fn from_iter<I>(
|
||||
&self,
|
||||
iter: I,
|
||||
) -> Result<Arc<CpuBufferPoolChunk<T>>, AllocationCreationError>
|
||||
where
|
||||
I: IntoIterator<Item = T>,
|
||||
I::IntoIter: ExactSizeIterator,
|
||||
@ -386,7 +382,7 @@ where
|
||||
fn chunk_impl(
|
||||
&self,
|
||||
data: impl ExactSizeIterator<Item = T>,
|
||||
) -> Result<CpuBufferPoolChunk<T, A>, DeviceMemoryError> {
|
||||
) -> Result<CpuBufferPoolChunk<T>, AllocationCreationError> {
|
||||
let mut mutex = self.current_buffer.lock().unwrap();
|
||||
|
||||
let data = match self.try_next_impl(&mut mutex, data) {
|
||||
@ -413,8 +409,7 @@ where
|
||||
///
|
||||
/// A `CpuBufferPool` is always empty the first time you use it, so you shouldn't use
|
||||
/// `try_next` the first time you use it.
|
||||
#[inline]
|
||||
pub fn try_next(&self, data: T) -> Option<Arc<CpuBufferPoolSubbuffer<T, A>>> {
|
||||
pub fn try_next(&self, data: T) -> Option<Arc<CpuBufferPoolSubbuffer<T>>> {
|
||||
let mut mutex = self.current_buffer.lock().unwrap();
|
||||
self.try_next_impl(&mut mutex, [data])
|
||||
.map(|c| Arc::new(CpuBufferPoolSubbuffer { chunk: c }))
|
||||
@ -426,50 +421,55 @@ where
|
||||
// `cur_buf_mutex` must be an active lock of `self.current_buffer`.
|
||||
fn reset_buf(
|
||||
&self,
|
||||
cur_buf_mutex: &mut MutexGuard<'_, Option<Arc<ActualBuffer<A>>>>,
|
||||
cur_buf_mutex: &mut MutexGuard<'_, Option<Arc<ActualBuffer>>>,
|
||||
capacity: DeviceSize,
|
||||
) -> Result<(), DeviceMemoryError> {
|
||||
) -> Result<(), AllocationCreationError> {
|
||||
let size = match (size_of::<T>() as DeviceSize).checked_mul(capacity) {
|
||||
Some(s) => s,
|
||||
None => return Err(DeviceMemoryError::OomError(OomError::OutOfDeviceMemory)),
|
||||
None => return Err(AllocationCreationError::OutOfDeviceMemory),
|
||||
};
|
||||
let buffer = match UnsafeBuffer::new(
|
||||
self.device.clone(),
|
||||
|
||||
let buffer = UnsafeBuffer::new(
|
||||
self.device().clone(),
|
||||
UnsafeBufferCreateInfo {
|
||||
size,
|
||||
usage: self.usage,
|
||||
usage: self.buffer_usage,
|
||||
..Default::default()
|
||||
},
|
||||
) {
|
||||
Ok(b) => b,
|
||||
Err(BufferCreationError::AllocError(err)) => return Err(err),
|
||||
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
|
||||
// errors can't happen
|
||||
)
|
||||
.map_err(|err| match err {
|
||||
BufferCreationError::AllocError(err) => err,
|
||||
// We don't use sparse-binding, therefore the other errors can't happen.
|
||||
_ => unreachable!(),
|
||||
})?;
|
||||
let requirements = buffer.memory_requirements();
|
||||
let create_info = AllocationCreateInfo {
|
||||
requirements,
|
||||
allocation_type: AllocationType::Linear,
|
||||
usage: self.memory_usage,
|
||||
allocate_preference: MemoryAllocatePreference::Unknown,
|
||||
dedicated_allocation: Some(DedicatedAllocation::Buffer(&buffer)),
|
||||
..Default::default()
|
||||
};
|
||||
let mem_reqs = buffer.memory_requirements();
|
||||
|
||||
unsafe {
|
||||
let mem = MemoryPool::alloc_from_requirements(
|
||||
&self.pool,
|
||||
&mem_reqs,
|
||||
AllocLayout::Linear,
|
||||
MappingRequirement::Map,
|
||||
Some(DedicatedAllocation::Buffer(&buffer)),
|
||||
|_| AllocFromRequirementsFilter::Allowed,
|
||||
)?;
|
||||
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
|
||||
debug_assert!(mem.mapped_memory().is_some());
|
||||
buffer.bind_memory(mem.memory(), mem.offset())?;
|
||||
match unsafe { self.allocator.allocate_unchecked(create_info) } {
|
||||
Ok(mut alloc) => {
|
||||
debug_assert!(alloc.offset() % requirements.alignment == 0);
|
||||
debug_assert!(alloc.size() == requirements.size);
|
||||
alloc.shrink(size);
|
||||
unsafe { buffer.bind_memory(alloc.device_memory(), alloc.offset()) }?;
|
||||
|
||||
**cur_buf_mutex = Some(Arc::new(ActualBuffer {
|
||||
inner: buffer,
|
||||
memory: mem,
|
||||
chunks_in_use: Mutex::new(vec![]),
|
||||
next_index: AtomicU64::new(0),
|
||||
capacity,
|
||||
}));
|
||||
**cur_buf_mutex = Some(Arc::new(ActualBuffer {
|
||||
inner: buffer,
|
||||
memory: alloc,
|
||||
chunks_in_use: Mutex::new(vec![]),
|
||||
next_index: AtomicU64::new(0),
|
||||
capacity,
|
||||
}));
|
||||
|
||||
Ok(())
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
@ -482,12 +482,11 @@ where
|
||||
// # Panic
|
||||
//
|
||||
// Panics if the length of the iterator didn't match the actual number of element.
|
||||
//
|
||||
fn try_next_impl<I>(
|
||||
&self,
|
||||
cur_buf_mutex: &mut MutexGuard<'_, Option<Arc<ActualBuffer<A>>>>,
|
||||
cur_buf_mutex: &mut MutexGuard<'_, Option<Arc<ActualBuffer>>>,
|
||||
data: I,
|
||||
) -> Result<CpuBufferPoolChunk<T, A>, I::IntoIter>
|
||||
) -> Result<CpuBufferPoolChunk<T>, I::IntoIter>
|
||||
where
|
||||
I: IntoIterator<Item = T>,
|
||||
I::IntoIter: ExactSizeIterator,
|
||||
@ -533,7 +532,7 @@ where
|
||||
let idx = current_buffer.next_index.load(Ordering::SeqCst);
|
||||
|
||||
// Find the required alignment in bytes.
|
||||
let align_uniform = if self.usage.uniform_buffer {
|
||||
let align_uniform = if self.buffer_usage.uniform_buffer {
|
||||
self.device()
|
||||
.physical_device()
|
||||
.properties()
|
||||
@ -541,7 +540,7 @@ where
|
||||
} else {
|
||||
1
|
||||
};
|
||||
let align_storage = if self.usage.storage_buffer {
|
||||
let align_storage = if self.buffer_usage.storage_buffer {
|
||||
self.device()
|
||||
.physical_device()
|
||||
.properties()
|
||||
@ -586,12 +585,10 @@ where
|
||||
|
||||
// Write `data` in the memory.
|
||||
unsafe {
|
||||
let mem_off = current_buffer.memory.offset();
|
||||
let range = (index * size_of::<T>() as DeviceSize + align_offset + mem_off)
|
||||
..((index + requested_len) * size_of::<T>() as DeviceSize + align_offset + mem_off);
|
||||
let range = (index * size_of::<T>() as DeviceSize + align_offset)
|
||||
..((index + requested_len) * size_of::<T>() as DeviceSize + align_offset);
|
||||
|
||||
let mapped_memory = current_buffer.memory.mapped_memory().unwrap();
|
||||
let bytes = mapped_memory.write(range.clone()).unwrap();
|
||||
let bytes = current_buffer.memory.write(range.clone()).unwrap();
|
||||
let mapping = <[T]>::from_bytes_mut(bytes).unwrap();
|
||||
|
||||
let mut written = 0;
|
||||
@ -600,12 +597,12 @@ where
|
||||
written += 1;
|
||||
}
|
||||
|
||||
mapped_memory.flush_range(range).unwrap();
|
||||
current_buffer.memory.flush_range(range).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
written, requested_len,
|
||||
"Iterator passed to CpuBufferPool::chunk has a mismatch between reported \
|
||||
length and actual number of elements"
|
||||
length and actual number of elements"
|
||||
);
|
||||
}
|
||||
|
||||
@ -634,16 +631,16 @@ where
|
||||
impl<T, A> Clone for CpuBufferPool<T, A>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool + Clone,
|
||||
A: MemoryAllocator + ?Sized,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
let buf = self.current_buffer.lock().unwrap();
|
||||
|
||||
CpuBufferPool {
|
||||
device: self.device.clone(),
|
||||
pool: self.pool.clone(),
|
||||
allocator: self.allocator.clone(),
|
||||
current_buffer: Mutex::new(buf.clone()),
|
||||
usage: self.usage,
|
||||
buffer_usage: self.buffer_usage,
|
||||
memory_usage: self.memory_usage,
|
||||
marker: PhantomData,
|
||||
}
|
||||
}
|
||||
@ -652,20 +649,18 @@ where
|
||||
unsafe impl<T, A> DeviceOwned for CpuBufferPool<T, A>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
A: MemoryAllocator + ?Sized,
|
||||
{
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
&self.device
|
||||
self.allocator.device()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> Clone for CpuBufferPoolChunk<T, A>
|
||||
impl<T> Clone for CpuBufferPoolChunk<T>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
fn clone(&self) -> CpuBufferPoolChunk<T, A> {
|
||||
fn clone(&self) -> CpuBufferPoolChunk<T> {
|
||||
let mut chunks_in_use_lock = self.buffer.chunks_in_use.lock().unwrap();
|
||||
let chunk = chunks_in_use_lock
|
||||
.iter_mut()
|
||||
@ -688,13 +683,11 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, A> BufferAccess for CpuBufferPoolChunk<T, A>
|
||||
unsafe impl<T> BufferAccess for CpuBufferPoolChunk<T>
|
||||
where
|
||||
T: Send + Sync,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> BufferInner<'_> {
|
||||
BufferInner {
|
||||
buffer: &self.buffer.inner,
|
||||
@ -702,28 +695,24 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> DeviceSize {
|
||||
self.requested_len * size_of::<T>() as DeviceSize
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> BufferAccessObject for Arc<CpuBufferPoolChunk<T, A>>
|
||||
impl<T> BufferAccessObject for Arc<CpuBufferPoolChunk<T>>
|
||||
where
|
||||
T: Send + Sync,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool + 'static,
|
||||
{
|
||||
#[inline]
|
||||
fn as_buffer_access_object(&self) -> Arc<dyn BufferAccess> {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> Drop for CpuBufferPoolChunk<T, A>
|
||||
impl<T> Drop for CpuBufferPoolChunk<T>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
// If `requested_len` is 0, then no entry was added in the chunks.
|
||||
@ -745,147 +734,125 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, A> TypedBufferAccess for CpuBufferPoolChunk<T, A>
|
||||
unsafe impl<T> TypedBufferAccess for CpuBufferPoolChunk<T>
|
||||
where
|
||||
T: Send + Sync,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
type Content = [T];
|
||||
}
|
||||
|
||||
unsafe impl<T, A> DeviceOwned for CpuBufferPoolChunk<T, A>
|
||||
unsafe impl<T> DeviceOwned for CpuBufferPoolChunk<T>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.buffer.inner.device()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> PartialEq for CpuBufferPoolChunk<T, A>
|
||||
impl<T> PartialEq for CpuBufferPoolChunk<T>
|
||||
where
|
||||
T: Send + Sync,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
#[inline]
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner() == other.inner() && self.size() == other.size()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> Eq for CpuBufferPoolChunk<T, A>
|
||||
impl<T> Eq for CpuBufferPoolChunk<T>
|
||||
where
|
||||
T: Send + Sync,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T, A> Hash for CpuBufferPoolChunk<T, A>
|
||||
impl<T> Hash for CpuBufferPoolChunk<T>
|
||||
where
|
||||
T: Send + Sync,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
#[inline]
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.inner().hash(state);
|
||||
self.size().hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> Clone for CpuBufferPoolSubbuffer<T, A>
|
||||
impl<T> Clone for CpuBufferPoolSubbuffer<T>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
fn clone(&self) -> CpuBufferPoolSubbuffer<T, A> {
|
||||
fn clone(&self) -> CpuBufferPoolSubbuffer<T> {
|
||||
CpuBufferPoolSubbuffer {
|
||||
chunk: self.chunk.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, A> BufferAccess for CpuBufferPoolSubbuffer<T, A>
|
||||
unsafe impl<T> BufferAccess for CpuBufferPoolSubbuffer<T>
|
||||
where
|
||||
T: Send + Sync,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
#[inline]
|
||||
fn inner(&self) -> BufferInner<'_> {
|
||||
self.chunk.inner()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> DeviceSize {
|
||||
self.chunk.size()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> BufferAccessObject for Arc<CpuBufferPoolSubbuffer<T, A>>
|
||||
impl<T> BufferAccessObject for Arc<CpuBufferPoolSubbuffer<T>>
|
||||
where
|
||||
T: Send + Sync,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool + 'static,
|
||||
{
|
||||
#[inline]
|
||||
fn as_buffer_access_object(&self) -> Arc<dyn BufferAccess> {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, A> TypedBufferAccess for CpuBufferPoolSubbuffer<T, A>
|
||||
unsafe impl<T> TypedBufferAccess for CpuBufferPoolSubbuffer<T>
|
||||
where
|
||||
T: BufferContents,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
type Content = T;
|
||||
}
|
||||
|
||||
unsafe impl<T, A> DeviceOwned for CpuBufferPoolSubbuffer<T, A>
|
||||
unsafe impl<T> DeviceOwned for CpuBufferPoolSubbuffer<T>
|
||||
where
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.chunk.buffer.inner.device()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> PartialEq for CpuBufferPoolSubbuffer<T, A>
|
||||
impl<T> PartialEq for CpuBufferPoolSubbuffer<T>
|
||||
where
|
||||
T: Send + Sync,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
#[inline]
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner() == other.inner() && self.size() == other.size()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> Eq for CpuBufferPoolSubbuffer<T, A>
|
||||
impl<T> Eq for CpuBufferPoolSubbuffer<T>
|
||||
where
|
||||
T: Send + Sync,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T, A> Hash for CpuBufferPoolSubbuffer<T, A>
|
||||
impl<T> Hash for CpuBufferPoolSubbuffer<T>
|
||||
where
|
||||
T: Send + Sync,
|
||||
[T]: BufferContents,
|
||||
A: MemoryPool,
|
||||
{
|
||||
#[inline]
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.inner().hash(state);
|
||||
self.size().hash(state);
|
||||
@ -894,20 +861,22 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::buffer::CpuBufferPool;
|
||||
use super::*;
|
||||
use std::mem;
|
||||
|
||||
#[test]
|
||||
fn basic_create() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let _ = CpuBufferPool::<u8>::upload(device);
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
|
||||
let _ = CpuBufferPool::<u8>::upload(memory_allocator);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reserve() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
|
||||
|
||||
let pool = CpuBufferPool::<u8>::upload(device);
|
||||
let pool = CpuBufferPool::<u8>::upload(memory_allocator);
|
||||
assert_eq!(pool.capacity(), 0);
|
||||
|
||||
pool.reserve(83).unwrap();
|
||||
@ -917,8 +886,9 @@ mod tests {
|
||||
#[test]
|
||||
fn capacity_increase() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
|
||||
|
||||
let pool = CpuBufferPool::upload(device);
|
||||
let pool = CpuBufferPool::upload(memory_allocator);
|
||||
assert_eq!(pool.capacity(), 0);
|
||||
|
||||
pool.from_data(12).unwrap();
|
||||
@ -935,8 +905,9 @@ mod tests {
|
||||
#[test]
|
||||
fn reuse_subbuffers() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
|
||||
|
||||
let pool = CpuBufferPool::upload(device);
|
||||
let pool = CpuBufferPool::upload(memory_allocator);
|
||||
assert_eq!(pool.capacity(), 0);
|
||||
|
||||
let mut capacity = None;
|
||||
@ -955,8 +926,9 @@ mod tests {
|
||||
#[test]
|
||||
fn chunk_loopback() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
|
||||
|
||||
let pool = CpuBufferPool::<u8>::upload(device);
|
||||
let pool = CpuBufferPool::<u8>::upload(memory_allocator);
|
||||
pool.reserve(5).unwrap();
|
||||
|
||||
let a = pool.from_iter(vec![0, 0]).unwrap();
|
||||
@ -973,8 +945,9 @@ mod tests {
|
||||
#[test]
|
||||
fn chunk_0_elems_doesnt_pollute() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
|
||||
|
||||
let pool = CpuBufferPool::<u8>::upload(device);
|
||||
let pool = CpuBufferPool::<u8>::upload(memory_allocator);
|
||||
|
||||
let _ = pool.from_iter(vec![]).unwrap();
|
||||
let _ = pool.from_iter(vec![0, 0]).unwrap();
|
||||
|
@ -16,31 +16,26 @@
|
||||
|
||||
use super::{
|
||||
sys::{UnsafeBuffer, UnsafeBufferCreateInfo},
|
||||
BufferAccess, BufferAccessObject, BufferContents, BufferCreationError, BufferInner,
|
||||
BufferUsage, CpuAccessibleBuffer, TypedBufferAccess,
|
||||
BufferAccess, BufferAccessObject, BufferContents, BufferInner, BufferUsage,
|
||||
CpuAccessibleBuffer, TypedBufferAccess,
|
||||
};
|
||||
use crate::{
|
||||
command_buffer::{
|
||||
allocator::CommandBufferAllocator, AutoCommandBufferBuilder, CommandBufferBeginError,
|
||||
CopyBufferInfo,
|
||||
},
|
||||
buffer::{BufferCreationError, ExternalBufferInfo},
|
||||
command_buffer::{allocator::CommandBufferAllocator, AutoCommandBufferBuilder, CopyBufferInfo},
|
||||
device::{Device, DeviceOwned},
|
||||
memory::{
|
||||
pool::{
|
||||
alloc_dedicated_with_exportable_fd, AllocFromRequirementsFilter, AllocLayout,
|
||||
MappingRequirement, MemoryPoolAlloc, PotentialDedicatedAllocation,
|
||||
StandardMemoryPoolAlloc,
|
||||
allocator::{
|
||||
AllocationCreateInfo, AllocationCreationError, AllocationType, MemoryAlloc,
|
||||
MemoryAllocatePreference, MemoryAllocator, MemoryUsage,
|
||||
},
|
||||
DedicatedAllocation, DeviceMemoryError, ExternalMemoryHandleType, MemoryPool,
|
||||
MemoryRequirements,
|
||||
DedicatedAllocation, DeviceMemoryError, ExternalMemoryHandleType,
|
||||
ExternalMemoryHandleTypes,
|
||||
},
|
||||
sync::Sharing,
|
||||
DeviceSize,
|
||||
};
|
||||
use smallvec::SmallVec;
|
||||
use std::{
|
||||
error::Error,
|
||||
fmt::{Display, Error as FmtError, Formatter},
|
||||
fs::File,
|
||||
hash::{Hash, Hasher},
|
||||
marker::PhantomData,
|
||||
@ -79,6 +74,7 @@ use std::{
|
||||
/// use vulkano::sync::GpuFuture;
|
||||
/// # let device: std::sync::Arc<vulkano::device::Device> = return;
|
||||
/// # let queue: std::sync::Arc<vulkano::device::Queue> = return;
|
||||
/// # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return;
|
||||
/// # let command_buffer_allocator: vulkano::command_buffer::allocator::StandardCommandBufferAllocator = return;
|
||||
///
|
||||
/// // Simple iterator to construct test data.
|
||||
@ -86,7 +82,7 @@ use std::{
|
||||
///
|
||||
/// // Create a CPU accessible buffer initialized with the data.
|
||||
/// let temporary_accessible_buffer = CpuAccessibleBuffer::from_iter(
|
||||
/// device.clone(),
|
||||
/// &memory_allocator,
|
||||
/// BufferUsage { transfer_src: true, ..BufferUsage::empty() }, // Specify this buffer will be used as a transfer source.
|
||||
/// false,
|
||||
/// data,
|
||||
@ -95,7 +91,7 @@ use std::{
|
||||
///
|
||||
/// // Create a buffer array on the GPU with enough space for `10_000` floats.
|
||||
/// let device_local_buffer = DeviceLocalBuffer::<[f32]>::array(
|
||||
/// device.clone(),
|
||||
/// &memory_allocator,
|
||||
/// 10_000 as vulkano::DeviceSize,
|
||||
/// BufferUsage {
|
||||
/// storage_buffer: true,
|
||||
@ -129,7 +125,7 @@ use std::{
|
||||
/// .unwrap()
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct DeviceLocalBuffer<T, A = PotentialDedicatedAllocation<StandardMemoryPoolAlloc>>
|
||||
pub struct DeviceLocalBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
{
|
||||
@ -137,7 +133,7 @@ where
|
||||
inner: Arc<UnsafeBuffer>,
|
||||
|
||||
// The memory held by the buffer.
|
||||
memory: A,
|
||||
memory: MemoryAlloc,
|
||||
|
||||
// Queue families allowed to access this buffer.
|
||||
queue_family_indices: SmallVec<[u32; 4]>,
|
||||
@ -156,13 +152,13 @@ where
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
pub fn new(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
usage: BufferUsage,
|
||||
queue_family_indices: impl IntoIterator<Item = u32>,
|
||||
) -> Result<Arc<DeviceLocalBuffer<T>>, DeviceMemoryError> {
|
||||
) -> Result<Arc<DeviceLocalBuffer<T>>, AllocationCreationError> {
|
||||
unsafe {
|
||||
DeviceLocalBuffer::raw(
|
||||
device,
|
||||
allocator,
|
||||
size_of::<T>() as DeviceSize,
|
||||
usage,
|
||||
queue_family_indices,
|
||||
@ -183,16 +179,12 @@ where
|
||||
///
|
||||
/// `command_buffer_builder` can then be used to record other commands, built, and executed as
|
||||
/// normal. If it is not executed, the buffer contents will be left undefined.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `usage.shader_device_address` is `true`.
|
||||
// TODO: ^
|
||||
pub fn from_buffer<B, L, A>(
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
source: Arc<B>,
|
||||
usage: BufferUsage,
|
||||
command_buffer_builder: &mut AutoCommandBufferBuilder<L, A>,
|
||||
) -> Result<Arc<DeviceLocalBuffer<T>>, DeviceLocalBufferCreationError>
|
||||
) -> Result<Arc<DeviceLocalBuffer<T>>, AllocationCreationError>
|
||||
where
|
||||
B: TypedBufferAccess<Content = T> + 'static,
|
||||
A: CommandBufferAllocator,
|
||||
@ -205,7 +197,7 @@ where
|
||||
};
|
||||
|
||||
let buffer = DeviceLocalBuffer::raw(
|
||||
source.device().clone(),
|
||||
allocator,
|
||||
source.size(),
|
||||
actual_usage,
|
||||
source
|
||||
@ -237,18 +229,17 @@ where
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
/// - Panics if `usage.shader_device_address` is `true`.
|
||||
// TODO: ^
|
||||
pub fn from_data<L, A>(
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
data: T,
|
||||
usage: BufferUsage,
|
||||
command_buffer_builder: &mut AutoCommandBufferBuilder<L, A>,
|
||||
) -> Result<Arc<DeviceLocalBuffer<T>>, DeviceLocalBufferCreationError>
|
||||
) -> Result<Arc<DeviceLocalBuffer<T>>, AllocationCreationError>
|
||||
where
|
||||
A: CommandBufferAllocator,
|
||||
{
|
||||
let source = CpuAccessibleBuffer::from_data(
|
||||
command_buffer_builder.device().clone(),
|
||||
allocator,
|
||||
BufferUsage {
|
||||
transfer_src: true,
|
||||
..BufferUsage::empty()
|
||||
@ -256,7 +247,7 @@ where
|
||||
false,
|
||||
data,
|
||||
)?;
|
||||
DeviceLocalBuffer::from_buffer(source, usage, command_buffer_builder)
|
||||
DeviceLocalBuffer::from_buffer(allocator, source, usage, command_buffer_builder)
|
||||
}
|
||||
}
|
||||
|
||||
@ -274,20 +265,19 @@ where
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
/// - Panics if `data` is empty.
|
||||
/// - Panics if `usage.shader_device_address` is `true`.
|
||||
// TODO: ^
|
||||
pub fn from_iter<D, L, A>(
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
data: D,
|
||||
usage: BufferUsage,
|
||||
command_buffer_builder: &mut AutoCommandBufferBuilder<L, A>,
|
||||
) -> Result<Arc<DeviceLocalBuffer<[T]>>, DeviceLocalBufferCreationError>
|
||||
) -> Result<Arc<DeviceLocalBuffer<[T]>>, AllocationCreationError>
|
||||
where
|
||||
D: IntoIterator<Item = T>,
|
||||
D::IntoIter: ExactSizeIterator,
|
||||
A: CommandBufferAllocator,
|
||||
{
|
||||
let source = CpuAccessibleBuffer::from_iter(
|
||||
command_buffer_builder.device().clone(),
|
||||
allocator,
|
||||
BufferUsage {
|
||||
transfer_src: true,
|
||||
..BufferUsage::empty()
|
||||
@ -295,7 +285,7 @@ where
|
||||
false,
|
||||
data,
|
||||
)?;
|
||||
DeviceLocalBuffer::from_buffer(source, usage, command_buffer_builder)
|
||||
DeviceLocalBuffer::from_buffer(allocator, source, usage, command_buffer_builder)
|
||||
}
|
||||
}
|
||||
|
||||
@ -309,17 +299,15 @@ where
|
||||
///
|
||||
/// - Panics if `T` has zero size.
|
||||
/// - Panics if `len` is zero.
|
||||
/// - Panics if `usage.shader_device_address` is `true`.
|
||||
// TODO: ^
|
||||
pub fn array(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
len: DeviceSize,
|
||||
usage: BufferUsage,
|
||||
queue_family_indices: impl IntoIterator<Item = u32>,
|
||||
) -> Result<Arc<DeviceLocalBuffer<[T]>>, DeviceMemoryError> {
|
||||
) -> Result<Arc<DeviceLocalBuffer<[T]>>, AllocationCreationError> {
|
||||
unsafe {
|
||||
DeviceLocalBuffer::raw(
|
||||
device,
|
||||
allocator,
|
||||
len * size_of::<T>() as DeviceSize,
|
||||
usage,
|
||||
queue_family_indices,
|
||||
@ -341,41 +329,57 @@ where
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `size` is zero.
|
||||
/// - Panics if `usage.shader_device_address` is `true`.
|
||||
// TODO: ^
|
||||
pub unsafe fn raw(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
size: DeviceSize,
|
||||
usage: BufferUsage,
|
||||
queue_family_indices: impl IntoIterator<Item = u32>,
|
||||
) -> Result<Arc<DeviceLocalBuffer<T>>, DeviceMemoryError> {
|
||||
) -> Result<Arc<DeviceLocalBuffer<T>>, AllocationCreationError> {
|
||||
let queue_family_indices: SmallVec<[_; 4]> = queue_family_indices.into_iter().collect();
|
||||
|
||||
let (buffer, mem_reqs) = Self::build_buffer(&device, size, usage, &queue_family_indices)?;
|
||||
|
||||
let memory = MemoryPool::alloc_from_requirements(
|
||||
&device.standard_memory_pool(),
|
||||
&mem_reqs,
|
||||
AllocLayout::Linear,
|
||||
MappingRequirement::DoNotMap,
|
||||
Some(DedicatedAllocation::Buffer(&buffer)),
|
||||
|t| {
|
||||
if t.property_flags.device_local {
|
||||
AllocFromRequirementsFilter::Preferred
|
||||
let buffer = UnsafeBuffer::new(
|
||||
allocator.device().clone(),
|
||||
UnsafeBufferCreateInfo {
|
||||
sharing: if queue_family_indices.len() >= 2 {
|
||||
Sharing::Concurrent(queue_family_indices.clone())
|
||||
} else {
|
||||
AllocFromRequirementsFilter::Allowed
|
||||
}
|
||||
Sharing::Exclusive
|
||||
},
|
||||
size,
|
||||
usage,
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
debug_assert!((memory.offset() % mem_reqs.alignment) == 0);
|
||||
buffer.bind_memory(memory.memory(), memory.offset())?;
|
||||
)
|
||||
.map_err(|err| match err {
|
||||
BufferCreationError::AllocError(err) => err,
|
||||
// We don't use sparse-binding, therefore the other errors can't happen.
|
||||
_ => unreachable!(),
|
||||
})?;
|
||||
let requirements = buffer.memory_requirements();
|
||||
let create_info = AllocationCreateInfo {
|
||||
requirements,
|
||||
allocation_type: AllocationType::Linear,
|
||||
usage: MemoryUsage::GpuOnly,
|
||||
allocate_preference: MemoryAllocatePreference::Unknown,
|
||||
dedicated_allocation: Some(DedicatedAllocation::Buffer(&buffer)),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
Ok(Arc::new(DeviceLocalBuffer {
|
||||
inner: buffer,
|
||||
memory,
|
||||
queue_family_indices,
|
||||
marker: PhantomData,
|
||||
}))
|
||||
match allocator.allocate_unchecked(create_info) {
|
||||
Ok(alloc) => {
|
||||
debug_assert!(alloc.offset() % requirements.alignment == 0);
|
||||
debug_assert!(alloc.size() == requirements.size);
|
||||
buffer.bind_memory(alloc.device_memory(), alloc.offset())?;
|
||||
|
||||
Ok(Arc::new(DeviceLocalBuffer {
|
||||
inner: buffer,
|
||||
memory: alloc,
|
||||
queue_family_indices,
|
||||
marker: PhantomData,
|
||||
}))
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Same as `raw` but with exportable fd option for the allocated memory on Linux/BSD
|
||||
@ -383,76 +387,86 @@ where
|
||||
/// # Panics
|
||||
///
|
||||
/// - Panics if `size` is zero.
|
||||
/// - Panics if `usage.shader_device_address` is `true`.
|
||||
// TODO: ^
|
||||
pub unsafe fn raw_with_exportable_fd(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
size: DeviceSize,
|
||||
usage: BufferUsage,
|
||||
queue_family_indices: impl IntoIterator<Item = u32>,
|
||||
) -> Result<Arc<DeviceLocalBuffer<T>>, DeviceMemoryError> {
|
||||
assert!(device.enabled_extensions().khr_external_memory_fd);
|
||||
assert!(device.enabled_extensions().khr_external_memory);
|
||||
) -> Result<Arc<DeviceLocalBuffer<T>>, AllocationCreationError> {
|
||||
let enabled_extensions = allocator.device().enabled_extensions();
|
||||
assert!(enabled_extensions.khr_external_memory_fd);
|
||||
assert!(enabled_extensions.khr_external_memory);
|
||||
|
||||
let queue_family_indices: SmallVec<[_; 4]> = queue_family_indices.into_iter().collect();
|
||||
|
||||
let (buffer, mem_reqs) = Self::build_buffer(&device, size, usage, &queue_family_indices)?;
|
||||
let external_memory_properties = allocator
|
||||
.device()
|
||||
.physical_device()
|
||||
.external_buffer_properties(ExternalBufferInfo {
|
||||
usage,
|
||||
..ExternalBufferInfo::handle_type(ExternalMemoryHandleType::OpaqueFd)
|
||||
})
|
||||
.unwrap()
|
||||
.external_memory_properties;
|
||||
// VUID-VkExportMemoryAllocateInfo-handleTypes-00656
|
||||
assert!(external_memory_properties.exportable);
|
||||
|
||||
let memory = alloc_dedicated_with_exportable_fd(
|
||||
device,
|
||||
&mem_reqs,
|
||||
AllocLayout::Linear,
|
||||
MappingRequirement::DoNotMap,
|
||||
DedicatedAllocation::Buffer(&buffer),
|
||||
|t| {
|
||||
if t.property_flags.device_local {
|
||||
AllocFromRequirementsFilter::Preferred
|
||||
} else {
|
||||
AllocFromRequirementsFilter::Allowed
|
||||
}
|
||||
},
|
||||
)?;
|
||||
let mem_offset = memory.offset();
|
||||
debug_assert!((mem_offset % mem_reqs.alignment) == 0);
|
||||
buffer.bind_memory(memory.memory(), mem_offset)?;
|
||||
// VUID-VkMemoryAllocateInfo-pNext-00639
|
||||
// Guaranteed because we always create a dedicated allocation
|
||||
|
||||
Ok(Arc::new(DeviceLocalBuffer {
|
||||
inner: buffer,
|
||||
memory,
|
||||
queue_family_indices,
|
||||
marker: PhantomData,
|
||||
}))
|
||||
}
|
||||
|
||||
unsafe fn build_buffer(
|
||||
device: &Arc<Device>,
|
||||
size: DeviceSize,
|
||||
usage: BufferUsage,
|
||||
queue_family_indices: &SmallVec<[u32; 4]>,
|
||||
) -> Result<(Arc<UnsafeBuffer>, MemoryRequirements), DeviceMemoryError> {
|
||||
let buffer = {
|
||||
match UnsafeBuffer::new(
|
||||
device.clone(),
|
||||
UnsafeBufferCreateInfo {
|
||||
sharing: if queue_family_indices.len() >= 2 {
|
||||
Sharing::Concurrent(queue_family_indices.clone())
|
||||
} else {
|
||||
Sharing::Exclusive
|
||||
},
|
||||
size,
|
||||
usage,
|
||||
..Default::default()
|
||||
},
|
||||
) {
|
||||
Ok(b) => b,
|
||||
Err(BufferCreationError::AllocError(err)) => return Err(err),
|
||||
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
|
||||
// errors can't happen
|
||||
}
|
||||
let external_memory_handle_types = ExternalMemoryHandleTypes {
|
||||
opaque_fd: true,
|
||||
..ExternalMemoryHandleTypes::empty()
|
||||
};
|
||||
let mem_reqs = buffer.memory_requirements();
|
||||
let buffer = UnsafeBuffer::new(
|
||||
allocator.device().clone(),
|
||||
UnsafeBufferCreateInfo {
|
||||
sharing: if queue_family_indices.len() >= 2 {
|
||||
Sharing::Concurrent(queue_family_indices.clone())
|
||||
} else {
|
||||
Sharing::Exclusive
|
||||
},
|
||||
size,
|
||||
usage,
|
||||
external_memory_handle_types,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.map_err(|err| match err {
|
||||
BufferCreationError::AllocError(err) => err,
|
||||
// We don't use sparse-binding, therefore the other errors can't happen.
|
||||
_ => unreachable!(),
|
||||
})?;
|
||||
let requirements = buffer.memory_requirements();
|
||||
let memory_type_index = allocator
|
||||
.find_memory_type_index(requirements.memory_type_bits, MemoryUsage::GpuOnly.into())
|
||||
.expect("failed to find a suitable memory type");
|
||||
|
||||
Ok((buffer, mem_reqs))
|
||||
let memory_properties = allocator.device().physical_device().memory_properties();
|
||||
let heap_index = memory_properties.memory_types[memory_type_index as usize].heap_index;
|
||||
// VUID-vkAllocateMemory-pAllocateInfo-01713
|
||||
assert!(size <= memory_properties.memory_heaps[heap_index as usize].size);
|
||||
|
||||
match allocator.allocate_dedicated_unchecked(
|
||||
memory_type_index,
|
||||
requirements.size,
|
||||
Some(DedicatedAllocation::Buffer(&buffer)),
|
||||
external_memory_handle_types,
|
||||
) {
|
||||
Ok(alloc) => {
|
||||
debug_assert!(alloc.offset() % requirements.alignment == 0);
|
||||
debug_assert!(alloc.size() == requirements.size);
|
||||
buffer.bind_memory(alloc.device_memory(), alloc.offset())?;
|
||||
|
||||
Ok(Arc::new(DeviceLocalBuffer {
|
||||
inner: buffer,
|
||||
memory: alloc,
|
||||
queue_family_indices,
|
||||
marker: PhantomData,
|
||||
}))
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
|
||||
/// Exports posix file descriptor for the allocated memory
|
||||
@ -460,12 +474,12 @@ where
|
||||
/// Only works on Linux/BSD.
|
||||
pub fn export_posix_fd(&self) -> Result<File, DeviceMemoryError> {
|
||||
self.memory
|
||||
.memory()
|
||||
.device_memory()
|
||||
.export_fd(ExternalMemoryHandleType::OpaqueFd)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> DeviceLocalBuffer<T, A>
|
||||
impl<T> DeviceLocalBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
{
|
||||
@ -475,7 +489,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, A> DeviceOwned for DeviceLocalBuffer<T, A>
|
||||
unsafe impl<T> DeviceOwned for DeviceLocalBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
{
|
||||
@ -484,10 +498,9 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, A> BufferAccess for DeviceLocalBuffer<T, A>
|
||||
unsafe impl<T> BufferAccess for DeviceLocalBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync,
|
||||
{
|
||||
fn inner(&self) -> BufferInner<'_> {
|
||||
BufferInner {
|
||||
@ -501,45 +514,36 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> BufferAccessObject for Arc<DeviceLocalBuffer<T, A>>
|
||||
impl<T> BufferAccessObject for Arc<DeviceLocalBuffer<T>>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync + 'static,
|
||||
{
|
||||
fn as_buffer_access_object(&self) -> Arc<dyn BufferAccess> {
|
||||
self.clone()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, A> TypedBufferAccess for DeviceLocalBuffer<T, A>
|
||||
unsafe impl<T> TypedBufferAccess for DeviceLocalBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync,
|
||||
{
|
||||
type Content = T;
|
||||
}
|
||||
|
||||
impl<T, A> PartialEq for DeviceLocalBuffer<T, A>
|
||||
impl<T> PartialEq for DeviceLocalBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync,
|
||||
{
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner() == other.inner() && self.size() == other.size()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, A> Eq for DeviceLocalBuffer<T, A>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync,
|
||||
{
|
||||
}
|
||||
impl<T> Eq for DeviceLocalBuffer<T> where T: BufferContents + ?Sized {}
|
||||
|
||||
impl<T, A> Hash for DeviceLocalBuffer<T, A>
|
||||
impl<T> Hash for DeviceLocalBuffer<T>
|
||||
where
|
||||
T: BufferContents + ?Sized,
|
||||
A: Send + Sync,
|
||||
{
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.inner().hash(state);
|
||||
@ -547,42 +551,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum DeviceLocalBufferCreationError {
|
||||
DeviceMemoryAllocationError(DeviceMemoryError),
|
||||
CommandBufferBeginError(CommandBufferBeginError),
|
||||
}
|
||||
|
||||
impl Error for DeviceLocalBufferCreationError {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
match self {
|
||||
Self::DeviceMemoryAllocationError(err) => Some(err),
|
||||
Self::CommandBufferBeginError(err) => Some(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DeviceLocalBufferCreationError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
|
||||
match self {
|
||||
Self::DeviceMemoryAllocationError(err) => err.fmt(f),
|
||||
Self::CommandBufferBeginError(err) => err.fmt(f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DeviceMemoryError> for DeviceLocalBufferCreationError {
|
||||
fn from(e: DeviceMemoryError) -> Self {
|
||||
Self::DeviceMemoryAllocationError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CommandBufferBeginError> for DeviceLocalBufferCreationError {
|
||||
fn from(e: CommandBufferBeginError) -> Self {
|
||||
Self::CommandBufferBeginError(e)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@ -591,6 +559,7 @@ mod tests {
|
||||
allocator::StandardCommandBufferAllocator, CommandBufferUsage,
|
||||
PrimaryCommandBufferAbstract,
|
||||
},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
sync::GpuFuture,
|
||||
};
|
||||
|
||||
@ -605,8 +574,10 @@ mod tests {
|
||||
CommandBufferUsage::OneTimeSubmit,
|
||||
)
|
||||
.unwrap();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
|
||||
let buffer = DeviceLocalBuffer::from_data(
|
||||
&memory_allocator,
|
||||
12u32,
|
||||
BufferUsage {
|
||||
transfer_src: true,
|
||||
@ -617,7 +588,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let destination = CpuAccessibleBuffer::from_data(
|
||||
device,
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_dst: true,
|
||||
..BufferUsage::empty()
|
||||
@ -653,8 +624,10 @@ mod tests {
|
||||
CommandBufferUsage::OneTimeSubmit,
|
||||
)
|
||||
.unwrap();
|
||||
let allocator = StandardMemoryAllocator::new_default(device);
|
||||
|
||||
let buffer = DeviceLocalBuffer::from_iter(
|
||||
&allocator,
|
||||
(0..512u32).map(|n| n * 2),
|
||||
BufferUsage {
|
||||
transfer_src: true,
|
||||
@ -665,7 +638,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let destination = CpuAccessibleBuffer::from_iter(
|
||||
device,
|
||||
&allocator,
|
||||
BufferUsage {
|
||||
transfer_dst: true,
|
||||
..BufferUsage::empty()
|
||||
@ -697,16 +670,18 @@ mod tests {
|
||||
fn create_buffer_zero_size_data() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device);
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
let mut command_buffer_builder = AutoCommandBufferBuilder::primary(
|
||||
&command_buffer_allocator,
|
||||
queue.queue_family_index(),
|
||||
CommandBufferUsage::OneTimeSubmit,
|
||||
)
|
||||
.unwrap();
|
||||
let allocator = StandardMemoryAllocator::new_default(device);
|
||||
|
||||
assert_should_panic!({
|
||||
DeviceLocalBuffer::from_data(
|
||||
&allocator,
|
||||
(),
|
||||
BufferUsage {
|
||||
transfer_dst: true,
|
||||
|
@ -31,7 +31,10 @@ use super::{
|
||||
use crate::{
|
||||
device::{Device, DeviceOwned},
|
||||
macros::vulkan_bitflags,
|
||||
memory::{DeviceMemory, DeviceMemoryError, ExternalMemoryHandleTypes, MemoryRequirements},
|
||||
memory::{
|
||||
allocator::AllocationCreationError, DeviceMemory, ExternalMemoryHandleTypes,
|
||||
MemoryRequirements,
|
||||
},
|
||||
range_map::RangeMap,
|
||||
sync::{AccessError, CurrentAccess, Sharing},
|
||||
DeviceSize, OomError, RequirementNotMet, RequiresOneOf, Version, VulkanError, VulkanObject,
|
||||
@ -584,7 +587,7 @@ impl Default for UnsafeBufferCreateInfo {
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum BufferCreationError {
|
||||
/// Allocating memory failed.
|
||||
AllocError(DeviceMemoryError),
|
||||
AllocError(AllocationCreationError),
|
||||
|
||||
RequirementNotMet {
|
||||
required_for: &'static str,
|
||||
@ -645,11 +648,11 @@ impl From<OomError> for BufferCreationError {
|
||||
impl From<VulkanError> for BufferCreationError {
|
||||
fn from(err: VulkanError) -> BufferCreationError {
|
||||
match err {
|
||||
err @ VulkanError::OutOfHostMemory => {
|
||||
BufferCreationError::AllocError(DeviceMemoryError::from(err))
|
||||
VulkanError::OutOfHostMemory => {
|
||||
BufferCreationError::AllocError(AllocationCreationError::OutOfHostMemory)
|
||||
}
|
||||
err @ VulkanError::OutOfDeviceMemory => {
|
||||
BufferCreationError::AllocError(DeviceMemoryError::from(err))
|
||||
VulkanError::OutOfDeviceMemory => {
|
||||
BufferCreationError::AllocError(AllocationCreationError::OutOfDeviceMemory)
|
||||
}
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
|
@ -24,26 +24,28 @@
|
||||
//! use vulkano::buffer::view::{BufferView, BufferViewCreateInfo};
|
||||
//! use vulkano::format::Format;
|
||||
//!
|
||||
//! # let device: Arc<vulkano::device::Device> = return;
|
||||
//! # let queue: Arc<vulkano::device::Queue> = return;
|
||||
//! # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return;
|
||||
//! let usage = BufferUsage {
|
||||
//! storage_texel_buffer: true,
|
||||
//! ..BufferUsage::empty()
|
||||
//! };
|
||||
//!
|
||||
//! let buffer = DeviceLocalBuffer::<[u32]>::array(
|
||||
//! device.clone(),
|
||||
//! &memory_allocator,
|
||||
//! 128,
|
||||
//! usage,
|
||||
//! [queue.queue_family_index()],
|
||||
//! ).unwrap();
|
||||
//! )
|
||||
//! .unwrap();
|
||||
//! let _view = BufferView::new(
|
||||
//! buffer,
|
||||
//! BufferViewCreateInfo {
|
||||
//! format: Some(Format::R32_UINT),
|
||||
//! ..Default::default()
|
||||
//! },
|
||||
//! ).unwrap();
|
||||
//! )
|
||||
//! .unwrap();
|
||||
//! ```
|
||||
|
||||
use super::{BufferAccess, BufferAccessObject, BufferInner};
|
||||
@ -477,27 +479,31 @@ impl Hash for dyn BufferViewAbstract {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{BufferView, BufferViewCreateInfo, BufferViewCreationError};
|
||||
use crate::{
|
||||
buffer::{
|
||||
view::{BufferView, BufferViewCreateInfo, BufferViewCreationError},
|
||||
BufferUsage, DeviceLocalBuffer,
|
||||
},
|
||||
buffer::{BufferUsage, DeviceLocalBuffer},
|
||||
format::Format,
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn create_uniform() {
|
||||
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
|
||||
let usage = BufferUsage {
|
||||
uniform_texel_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
};
|
||||
|
||||
let buffer =
|
||||
DeviceLocalBuffer::<[[u8; 4]]>::array(device, 128, usage, [queue.queue_family_index()])
|
||||
.unwrap();
|
||||
let buffer = DeviceLocalBuffer::<[[u8; 4]]>::array(
|
||||
&memory_allocator,
|
||||
128,
|
||||
usage,
|
||||
[queue.queue_family_index()],
|
||||
)
|
||||
.unwrap();
|
||||
BufferView::new(
|
||||
buffer,
|
||||
BufferViewCreateInfo {
|
||||
@ -512,15 +518,20 @@ mod tests {
|
||||
fn create_storage() {
|
||||
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
|
||||
let usage = BufferUsage {
|
||||
storage_texel_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
};
|
||||
|
||||
let buffer =
|
||||
DeviceLocalBuffer::<[[u8; 4]]>::array(device, 128, usage, [queue.queue_family_index()])
|
||||
.unwrap();
|
||||
let buffer = DeviceLocalBuffer::<[[u8; 4]]>::array(
|
||||
&memory_allocator,
|
||||
128,
|
||||
usage,
|
||||
[queue.queue_family_index()],
|
||||
)
|
||||
.unwrap();
|
||||
BufferView::new(
|
||||
buffer,
|
||||
BufferViewCreateInfo {
|
||||
@ -535,15 +546,20 @@ mod tests {
|
||||
fn create_storage_atomic() {
|
||||
// `VK_FORMAT_R32_UINT` guaranteed to be a supported format for atomics
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
|
||||
let usage = BufferUsage {
|
||||
storage_texel_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
};
|
||||
|
||||
let buffer =
|
||||
DeviceLocalBuffer::<[u32]>::array(device, 128, usage, [queue.queue_family_index()])
|
||||
.unwrap();
|
||||
let buffer = DeviceLocalBuffer::<[u32]>::array(
|
||||
&memory_allocator,
|
||||
128,
|
||||
usage,
|
||||
[queue.queue_family_index()],
|
||||
)
|
||||
.unwrap();
|
||||
BufferView::new(
|
||||
buffer,
|
||||
BufferViewCreateInfo {
|
||||
@ -558,9 +574,10 @@ mod tests {
|
||||
fn wrong_usage() {
|
||||
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
|
||||
let buffer = DeviceLocalBuffer::<[[u8; 4]]>::array(
|
||||
device,
|
||||
&memory_allocator,
|
||||
128,
|
||||
BufferUsage {
|
||||
transfer_dst: true, // Dummy value
|
||||
@ -585,6 +602,7 @@ mod tests {
|
||||
#[test]
|
||||
fn unsupported_format() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
|
||||
let usage = BufferUsage {
|
||||
uniform_texel_buffer: true,
|
||||
@ -593,7 +611,7 @@ mod tests {
|
||||
};
|
||||
|
||||
let buffer = DeviceLocalBuffer::<[[f64; 4]]>::array(
|
||||
device,
|
||||
&memory_allocator,
|
||||
128,
|
||||
usage,
|
||||
[queue.queue_family_index()],
|
||||
|
@ -727,6 +727,12 @@ pub struct PrimaryAutoCommandBuffer<A = StandardCommandBufferAlloc> {
|
||||
state: Mutex<CommandBufferState>,
|
||||
}
|
||||
|
||||
unsafe impl<A> DeviceOwned for PrimaryAutoCommandBuffer<A> {
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.inner.device()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<A> VulkanObject for PrimaryAutoCommandBuffer<A> {
|
||||
type Handle = ash::vk::CommandBuffer;
|
||||
|
||||
@ -735,12 +741,6 @@ unsafe impl<A> VulkanObject for PrimaryAutoCommandBuffer<A> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<A> DeviceOwned for PrimaryAutoCommandBuffer<A> {
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.inner.device()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<A> PrimaryCommandBufferAbstract for PrimaryAutoCommandBuffer<A>
|
||||
where
|
||||
A: CommandBufferAlloc,
|
||||
@ -918,6 +918,7 @@ mod tests {
|
||||
ExecuteCommandsError,
|
||||
},
|
||||
device::{DeviceCreateInfo, QueueCreateInfo},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
sync::GpuFuture,
|
||||
};
|
||||
|
||||
@ -943,9 +944,10 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let queue = queues.next().unwrap();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
|
||||
let source = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_src: true,
|
||||
..BufferUsage::empty()
|
||||
@ -956,7 +958,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let destination = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_dst: true,
|
||||
..BufferUsage::empty()
|
||||
@ -966,9 +968,9 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let allocator = StandardCommandBufferAllocator::new(device);
|
||||
let cb_allocator = StandardCommandBufferAllocator::new(device);
|
||||
let mut cbb = AutoCommandBufferBuilder::primary(
|
||||
&allocator,
|
||||
&cb_allocator,
|
||||
queue.queue_family_index(),
|
||||
CommandBufferUsage::OneTimeSubmit,
|
||||
)
|
||||
@ -1004,11 +1006,11 @@ mod tests {
|
||||
fn secondary_nonconcurrent_conflict() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let allocator = StandardCommandBufferAllocator::new(device);
|
||||
let cb_allocator = StandardCommandBufferAllocator::new(device);
|
||||
|
||||
// Make a secondary CB that doesn't support simultaneous use.
|
||||
let builder = AutoCommandBufferBuilder::secondary(
|
||||
&allocator,
|
||||
&cb_allocator,
|
||||
queue.queue_family_index(),
|
||||
CommandBufferUsage::MultipleSubmit,
|
||||
Default::default(),
|
||||
@ -1018,7 +1020,7 @@ mod tests {
|
||||
|
||||
{
|
||||
let mut builder = AutoCommandBufferBuilder::primary(
|
||||
&allocator,
|
||||
&cb_allocator,
|
||||
queue.queue_family_index(),
|
||||
CommandBufferUsage::SimultaneousUse,
|
||||
)
|
||||
@ -1041,7 +1043,7 @@ mod tests {
|
||||
|
||||
{
|
||||
let mut builder = AutoCommandBufferBuilder::primary(
|
||||
&allocator,
|
||||
&cb_allocator,
|
||||
queue.queue_family_index(),
|
||||
CommandBufferUsage::SimultaneousUse,
|
||||
)
|
||||
@ -1050,7 +1052,7 @@ mod tests {
|
||||
let cb1 = builder.build().unwrap();
|
||||
|
||||
let mut builder = AutoCommandBufferBuilder::primary(
|
||||
&allocator,
|
||||
&cb_allocator,
|
||||
queue.queue_family_index(),
|
||||
CommandBufferUsage::SimultaneousUse,
|
||||
)
|
||||
@ -1078,8 +1080,9 @@ mod tests {
|
||||
fn buffer_self_copy_overlapping() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let source = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_src: true,
|
||||
transfer_dst: true,
|
||||
@ -1090,9 +1093,9 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let allocator = StandardCommandBufferAllocator::new(device);
|
||||
let cb_allocator = StandardCommandBufferAllocator::new(device);
|
||||
let mut builder = AutoCommandBufferBuilder::primary(
|
||||
&allocator,
|
||||
&cb_allocator,
|
||||
queue.queue_family_index(),
|
||||
CommandBufferUsage::OneTimeSubmit,
|
||||
)
|
||||
@ -1129,8 +1132,9 @@ mod tests {
|
||||
fn buffer_self_copy_not_overlapping() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let source = CpuAccessibleBuffer::from_iter(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
transfer_src: true,
|
||||
transfer_dst: true,
|
||||
@ -1141,9 +1145,9 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let allocator = StandardCommandBufferAllocator::new(device);
|
||||
let cb_allocator = StandardCommandBufferAllocator::new(device);
|
||||
let mut builder = AutoCommandBufferBuilder::primary(
|
||||
&allocator,
|
||||
&cb_allocator,
|
||||
queue.queue_family_index(),
|
||||
CommandBufferUsage::OneTimeSubmit,
|
||||
)
|
||||
|
@ -377,6 +377,7 @@ mod tests {
|
||||
},
|
||||
PersistentDescriptorSet, WriteDescriptorSet,
|
||||
},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{layout::PipelineLayoutCreateInfo, PipelineBindPoint, PipelineLayout},
|
||||
sampler::{Sampler, SamplerCreateInfo},
|
||||
shader::ShaderStages,
|
||||
@ -412,27 +413,28 @@ mod tests {
|
||||
unsafe {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device);
|
||||
let mut command_buffer_builder = AutoCommandBufferBuilder::primary(
|
||||
&command_buffer_allocator,
|
||||
let cb_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
let mut cbb = AutoCommandBufferBuilder::primary(
|
||||
&cb_allocator,
|
||||
queue.queue_family_index(),
|
||||
CommandBufferUsage::OneTimeSubmit,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
// Create a tiny test buffer
|
||||
let buffer = DeviceLocalBuffer::from_data(
|
||||
&memory_allocator,
|
||||
0u32,
|
||||
BufferUsage {
|
||||
transfer_dst: true,
|
||||
..BufferUsage::empty()
|
||||
},
|
||||
&mut command_buffer_builder,
|
||||
&mut cbb,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
command_buffer_builder
|
||||
.build()
|
||||
cbb.build()
|
||||
.unwrap()
|
||||
.execute(queue.clone())
|
||||
.unwrap()
|
||||
@ -445,7 +447,7 @@ mod tests {
|
||||
let secondary = (0..2)
|
||||
.map(|_| {
|
||||
let mut builder = AutoCommandBufferBuilder::secondary(
|
||||
&command_buffer_allocator,
|
||||
&cb_allocator,
|
||||
queue.queue_family_index(),
|
||||
CommandBufferUsage::SimultaneousUse,
|
||||
Default::default(),
|
||||
@ -461,7 +463,7 @@ mod tests {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let allocs = command_buffer_allocator
|
||||
let allocs = cb_allocator
|
||||
.allocate(queue.queue_family_index(), CommandBufferLevel::Primary, 2)
|
||||
.unwrap()
|
||||
.collect::<Vec<_>>();
|
||||
@ -520,9 +522,8 @@ mod tests {
|
||||
unsafe {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
|
||||
let builder_alloc = allocator
|
||||
let cb_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
let builder_alloc = cb_allocator
|
||||
.allocate(queue.queue_family_index(), CommandBufferLevel::Primary, 1)
|
||||
.unwrap()
|
||||
.next()
|
||||
@ -535,8 +536,10 @@ mod tests {
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let buf = CpuAccessibleBuffer::from_data(
|
||||
device,
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
vertex_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -114,9 +114,8 @@ pub use crate::{
|
||||
fns::DeviceFunctions,
|
||||
};
|
||||
use crate::{
|
||||
instance::Instance,
|
||||
memory::{pool::StandardMemoryPool, ExternalMemoryHandleType},
|
||||
OomError, RequirementNotMet, RequiresOneOf, Version, VulkanError, VulkanObject,
|
||||
instance::Instance, memory::ExternalMemoryHandleType, OomError, RequirementNotMet,
|
||||
RequiresOneOf, Version, VulkanError, VulkanObject,
|
||||
};
|
||||
use ash::vk::Handle;
|
||||
use parking_lot::Mutex;
|
||||
@ -132,7 +131,7 @@ use std::{
|
||||
ptr,
|
||||
sync::{
|
||||
atomic::{AtomicU32, Ordering},
|
||||
Arc, Weak,
|
||||
Arc,
|
||||
},
|
||||
};
|
||||
|
||||
@ -153,7 +152,6 @@ pub struct Device {
|
||||
api_version: Version,
|
||||
|
||||
fns: DeviceFunctions,
|
||||
standard_memory_pool: Mutex<Weak<StandardMemoryPool>>,
|
||||
enabled_extensions: DeviceExtensions,
|
||||
enabled_features: Features,
|
||||
active_queue_family_indices: SmallVec<[u32; 2]>,
|
||||
@ -410,7 +408,6 @@ impl Device {
|
||||
physical_device,
|
||||
api_version,
|
||||
fns,
|
||||
standard_memory_pool: Mutex::new(Weak::new()),
|
||||
enabled_extensions,
|
||||
enabled_features,
|
||||
active_queue_family_indices,
|
||||
@ -491,21 +488,6 @@ impl Device {
|
||||
&self.enabled_features
|
||||
}
|
||||
|
||||
/// Returns the standard memory pool used by default if you don't provide any other pool.
|
||||
pub fn standard_memory_pool(self: &Arc<Self>) -> Arc<StandardMemoryPool> {
|
||||
let mut pool = self.standard_memory_pool.lock();
|
||||
|
||||
if let Some(p) = pool.upgrade() {
|
||||
return p;
|
||||
}
|
||||
|
||||
// The weak pointer is empty, so we create the pool.
|
||||
let new_pool = StandardMemoryPool::new(self.clone());
|
||||
*pool = Arc::downgrade(&new_pool);
|
||||
|
||||
new_pool
|
||||
}
|
||||
|
||||
/// Returns the current number of active [`DeviceMemory`] allocations the device has.
|
||||
///
|
||||
/// [`DeviceMemory`]: crate::memory::DeviceMemory
|
||||
|
@ -14,15 +14,14 @@ use super::{
|
||||
use crate::{
|
||||
device::{Device, DeviceOwned},
|
||||
format::Format,
|
||||
image::{sys::UnsafeImageCreateInfo, ImageDimensions},
|
||||
image::{sys::UnsafeImageCreateInfo, ImageDimensions, ImageFormatInfo},
|
||||
memory::{
|
||||
pool::{
|
||||
alloc_dedicated_with_exportable_fd, AllocFromRequirementsFilter, AllocLayout,
|
||||
MappingRequirement, MemoryPoolAlloc, PotentialDedicatedAllocation,
|
||||
StandardMemoryPoolAlloc,
|
||||
allocator::{
|
||||
AllocationCreateInfo, AllocationType, MemoryAlloc, MemoryAllocatePreference,
|
||||
MemoryAllocator, MemoryUsage,
|
||||
},
|
||||
DedicatedAllocation, DeviceMemoryError, ExternalMemoryHandleType,
|
||||
ExternalMemoryHandleTypes, MemoryPool,
|
||||
ExternalMemoryHandleTypes,
|
||||
},
|
||||
DeviceSize,
|
||||
};
|
||||
@ -65,12 +64,12 @@ use std::{
|
||||
///
|
||||
// TODO: forbid reading transient images outside render passes?
|
||||
#[derive(Debug)]
|
||||
pub struct AttachmentImage<A = PotentialDedicatedAllocation<StandardMemoryPoolAlloc>> {
|
||||
pub struct AttachmentImage {
|
||||
// Inner implementation.
|
||||
image: Arc<UnsafeImage>,
|
||||
|
||||
// Memory used to back the image.
|
||||
memory: A,
|
||||
memory: MemoryAlloc,
|
||||
|
||||
// Layout to use when the image is used as a framebuffer attachment.
|
||||
// Must be either "depth-stencil optimal" or "color optimal".
|
||||
@ -88,12 +87,12 @@ impl AttachmentImage {
|
||||
/// format as a framebuffer attachment.
|
||||
#[inline]
|
||||
pub fn new(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
format: Format,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
AttachmentImage::new_impl(
|
||||
device,
|
||||
allocator,
|
||||
dimensions,
|
||||
1,
|
||||
format,
|
||||
@ -107,7 +106,7 @@ impl AttachmentImage {
|
||||
/// > **Note**: This function is just a convenient shortcut for `with_usage`.
|
||||
#[inline]
|
||||
pub fn input_attachment(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
format: Format,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
@ -117,7 +116,7 @@ impl AttachmentImage {
|
||||
};
|
||||
|
||||
AttachmentImage::new_impl(
|
||||
device,
|
||||
allocator,
|
||||
dimensions,
|
||||
1,
|
||||
format,
|
||||
@ -132,12 +131,19 @@ impl AttachmentImage {
|
||||
/// > want a regular image.
|
||||
#[inline]
|
||||
pub fn multisampled(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
samples: SampleCount,
|
||||
format: Format,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
AttachmentImage::new_impl(device, dimensions, 1, format, ImageUsage::empty(), samples)
|
||||
AttachmentImage::new_impl(
|
||||
allocator,
|
||||
dimensions,
|
||||
1,
|
||||
format,
|
||||
ImageUsage::empty(),
|
||||
samples,
|
||||
)
|
||||
}
|
||||
|
||||
/// Same as `multisampled`, but creates an image that can be used as an input attachment.
|
||||
@ -145,7 +151,7 @@ impl AttachmentImage {
|
||||
/// > **Note**: This function is just a convenient shortcut for `multisampled_with_usage`.
|
||||
#[inline]
|
||||
pub fn multisampled_input_attachment(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
samples: SampleCount,
|
||||
format: Format,
|
||||
@ -155,7 +161,7 @@ impl AttachmentImage {
|
||||
..ImageUsage::empty()
|
||||
};
|
||||
|
||||
AttachmentImage::new_impl(device, dimensions, 1, format, base_usage, samples)
|
||||
AttachmentImage::new_impl(allocator, dimensions, 1, format, base_usage, samples)
|
||||
}
|
||||
|
||||
/// Same as `new`, but lets you specify additional usages.
|
||||
@ -165,12 +171,19 @@ impl AttachmentImage {
|
||||
/// addition to these two.
|
||||
#[inline]
|
||||
pub fn with_usage(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
format: Format,
|
||||
usage: ImageUsage,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
AttachmentImage::new_impl(device, dimensions, 1, format, usage, SampleCount::Sample1)
|
||||
AttachmentImage::new_impl(
|
||||
allocator,
|
||||
dimensions,
|
||||
1,
|
||||
format,
|
||||
usage,
|
||||
SampleCount::Sample1,
|
||||
)
|
||||
}
|
||||
|
||||
/// Same as `with_usage`, but creates a multisampled image.
|
||||
@ -179,13 +192,13 @@ impl AttachmentImage {
|
||||
/// > want a regular image.
|
||||
#[inline]
|
||||
pub fn multisampled_with_usage(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
samples: SampleCount,
|
||||
format: Format,
|
||||
usage: ImageUsage,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
AttachmentImage::new_impl(device, dimensions, 1, format, usage, samples)
|
||||
AttachmentImage::new_impl(allocator, dimensions, 1, format, usage, samples)
|
||||
}
|
||||
|
||||
/// Same as `multisampled_with_usage`, but creates an image with multiple layers.
|
||||
@ -194,14 +207,14 @@ impl AttachmentImage {
|
||||
/// > want a regular image.
|
||||
#[inline]
|
||||
pub fn multisampled_with_usage_with_layers(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
array_layers: u32,
|
||||
samples: SampleCount,
|
||||
format: Format,
|
||||
usage: ImageUsage,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
AttachmentImage::new_impl(device, dimensions, array_layers, format, usage, samples)
|
||||
AttachmentImage::new_impl(allocator, dimensions, array_layers, format, usage, samples)
|
||||
}
|
||||
|
||||
/// Same as `new`, except that the image can later be sampled.
|
||||
@ -209,7 +222,7 @@ impl AttachmentImage {
|
||||
/// > **Note**: This function is just a convenient shortcut for `with_usage`.
|
||||
#[inline]
|
||||
pub fn sampled(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
format: Format,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
@ -219,7 +232,7 @@ impl AttachmentImage {
|
||||
};
|
||||
|
||||
AttachmentImage::new_impl(
|
||||
device,
|
||||
allocator,
|
||||
dimensions,
|
||||
1,
|
||||
format,
|
||||
@ -233,7 +246,7 @@ impl AttachmentImage {
|
||||
/// > **Note**: This function is just a convenient shortcut for `with_usage`.
|
||||
#[inline]
|
||||
pub fn sampled_input_attachment(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
format: Format,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
@ -244,7 +257,7 @@ impl AttachmentImage {
|
||||
};
|
||||
|
||||
AttachmentImage::new_impl(
|
||||
device,
|
||||
allocator,
|
||||
dimensions,
|
||||
1,
|
||||
format,
|
||||
@ -261,7 +274,7 @@ impl AttachmentImage {
|
||||
/// > **Note**: This function is just a convenient shortcut for `multisampled_with_usage`.
|
||||
#[inline]
|
||||
pub fn sampled_multisampled(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
samples: SampleCount,
|
||||
format: Format,
|
||||
@ -271,7 +284,7 @@ impl AttachmentImage {
|
||||
..ImageUsage::empty()
|
||||
};
|
||||
|
||||
AttachmentImage::new_impl(device, dimensions, 1, format, base_usage, samples)
|
||||
AttachmentImage::new_impl(allocator, dimensions, 1, format, base_usage, samples)
|
||||
}
|
||||
|
||||
/// Same as `sampled_multisampled`, but creates an image that can be used as an input
|
||||
@ -280,7 +293,7 @@ impl AttachmentImage {
|
||||
/// > **Note**: This function is just a convenient shortcut for `multisampled_with_usage`.
|
||||
#[inline]
|
||||
pub fn sampled_multisampled_input_attachment(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
samples: SampleCount,
|
||||
format: Format,
|
||||
@ -291,7 +304,7 @@ impl AttachmentImage {
|
||||
..ImageUsage::empty()
|
||||
};
|
||||
|
||||
AttachmentImage::new_impl(device, dimensions, 1, format, base_usage, samples)
|
||||
AttachmentImage::new_impl(allocator, dimensions, 1, format, base_usage, samples)
|
||||
}
|
||||
|
||||
/// Same as `new`, except that the image will be transient.
|
||||
@ -302,7 +315,7 @@ impl AttachmentImage {
|
||||
/// > **Note**: This function is just a convenient shortcut for `with_usage`.
|
||||
#[inline]
|
||||
pub fn transient(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
format: Format,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
@ -312,7 +325,7 @@ impl AttachmentImage {
|
||||
};
|
||||
|
||||
AttachmentImage::new_impl(
|
||||
device,
|
||||
allocator,
|
||||
dimensions,
|
||||
1,
|
||||
format,
|
||||
@ -326,7 +339,7 @@ impl AttachmentImage {
|
||||
/// > **Note**: This function is just a convenient shortcut for `with_usage`.
|
||||
#[inline]
|
||||
pub fn transient_input_attachment(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
format: Format,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
@ -337,7 +350,7 @@ impl AttachmentImage {
|
||||
};
|
||||
|
||||
AttachmentImage::new_impl(
|
||||
device,
|
||||
allocator,
|
||||
dimensions,
|
||||
1,
|
||||
format,
|
||||
@ -354,7 +367,7 @@ impl AttachmentImage {
|
||||
/// > **Note**: This function is just a convenient shortcut for `multisampled_with_usage`.
|
||||
#[inline]
|
||||
pub fn transient_multisampled(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
samples: SampleCount,
|
||||
format: Format,
|
||||
@ -364,7 +377,7 @@ impl AttachmentImage {
|
||||
..ImageUsage::empty()
|
||||
};
|
||||
|
||||
AttachmentImage::new_impl(device, dimensions, 1, format, base_usage, samples)
|
||||
AttachmentImage::new_impl(allocator, dimensions, 1, format, base_usage, samples)
|
||||
}
|
||||
|
||||
/// Same as `transient_multisampled`, but creates an image that can be used as an input
|
||||
@ -373,7 +386,7 @@ impl AttachmentImage {
|
||||
/// > **Note**: This function is just a convenient shortcut for `multisampled_with_usage`.
|
||||
#[inline]
|
||||
pub fn transient_multisampled_input_attachment(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
samples: SampleCount,
|
||||
format: Format,
|
||||
@ -384,19 +397,19 @@ impl AttachmentImage {
|
||||
..ImageUsage::empty()
|
||||
};
|
||||
|
||||
AttachmentImage::new_impl(device, dimensions, 1, format, base_usage, samples)
|
||||
AttachmentImage::new_impl(allocator, dimensions, 1, format, base_usage, samples)
|
||||
}
|
||||
|
||||
// All constructors dispatch to this one.
|
||||
fn new_impl(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
array_layers: u32,
|
||||
format: Format,
|
||||
base_usage: ImageUsage,
|
||||
samples: SampleCount,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
let physical_device = device.physical_device();
|
||||
let physical_device = allocator.device().physical_device();
|
||||
let device_properties = physical_device.properties();
|
||||
|
||||
if dimensions[0] > device_properties.max_framebuffer_height {
|
||||
@ -417,7 +430,7 @@ impl AttachmentImage {
|
||||
}
|
||||
|
||||
let image = UnsafeImage::new(
|
||||
device.clone(),
|
||||
allocator.device().clone(),
|
||||
UnsafeImageCreateInfo {
|
||||
dimensions: ImageDimensions::Dim2d {
|
||||
width: dimensions[0],
|
||||
@ -434,48 +447,46 @@ impl AttachmentImage {
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
let requirements = image.memory_requirements();
|
||||
let create_info = AllocationCreateInfo {
|
||||
requirements,
|
||||
allocation_type: AllocationType::NonLinear,
|
||||
usage: MemoryUsage::GpuOnly,
|
||||
allocate_preference: MemoryAllocatePreference::Unknown,
|
||||
dedicated_allocation: Some(DedicatedAllocation::Image(&image)),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mem_reqs = image.memory_requirements();
|
||||
let memory = MemoryPool::alloc_from_requirements(
|
||||
&device.standard_memory_pool(),
|
||||
&mem_reqs,
|
||||
AllocLayout::Optimal,
|
||||
MappingRequirement::DoNotMap,
|
||||
Some(DedicatedAllocation::Image(&image)),
|
||||
|t| {
|
||||
if t.property_flags.device_local {
|
||||
AllocFromRequirementsFilter::Preferred
|
||||
} else {
|
||||
AllocFromRequirementsFilter::Allowed
|
||||
}
|
||||
},
|
||||
)?;
|
||||
debug_assert!((memory.offset() % mem_reqs.alignment) == 0);
|
||||
unsafe {
|
||||
image.bind_memory(memory.memory(), memory.offset())?;
|
||||
match unsafe { allocator.allocate_unchecked(create_info) } {
|
||||
Ok(alloc) => {
|
||||
debug_assert!(alloc.offset() % requirements.alignment == 0);
|
||||
debug_assert!(alloc.size() == requirements.size);
|
||||
unsafe { image.bind_memory(alloc.device_memory(), alloc.offset()) }?;
|
||||
|
||||
Ok(Arc::new(AttachmentImage {
|
||||
image,
|
||||
memory: alloc,
|
||||
attachment_layout: if is_depth {
|
||||
ImageLayout::DepthStencilAttachmentOptimal
|
||||
} else {
|
||||
ImageLayout::ColorAttachmentOptimal
|
||||
},
|
||||
initialized: AtomicBool::new(false),
|
||||
}))
|
||||
}
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
|
||||
Ok(Arc::new(AttachmentImage {
|
||||
image,
|
||||
memory,
|
||||
attachment_layout: if is_depth {
|
||||
ImageLayout::DepthStencilAttachmentOptimal
|
||||
} else {
|
||||
ImageLayout::ColorAttachmentOptimal
|
||||
},
|
||||
initialized: AtomicBool::new(false),
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn new_with_exportable_fd(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: [u32; 2],
|
||||
array_layers: u32,
|
||||
format: Format,
|
||||
base_usage: ImageUsage,
|
||||
samples: SampleCount,
|
||||
) -> Result<Arc<AttachmentImage>, ImageCreationError> {
|
||||
let physical_device = device.physical_device();
|
||||
let physical_device = allocator.device().physical_device();
|
||||
let device_properties = physical_device.properties();
|
||||
|
||||
if dimensions[0] > device_properties.max_framebuffer_height {
|
||||
@ -490,9 +501,37 @@ impl AttachmentImage {
|
||||
|
||||
let aspects = format.aspects();
|
||||
let is_depth = aspects.depth || aspects.stencil;
|
||||
let usage = ImageUsage {
|
||||
color_attachment: !is_depth,
|
||||
depth_stencil_attachment: is_depth,
|
||||
..base_usage
|
||||
};
|
||||
|
||||
let external_memory_properties = allocator
|
||||
.device()
|
||||
.physical_device()
|
||||
.image_format_properties(ImageFormatInfo {
|
||||
format: Some(format),
|
||||
usage,
|
||||
external_memory_handle_type: Some(ExternalMemoryHandleType::OpaqueFd),
|
||||
mutable_format: true,
|
||||
..Default::default()
|
||||
})
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.external_memory_properties;
|
||||
// VUID-VkExportMemoryAllocateInfo-handleTypes-00656
|
||||
assert!(external_memory_properties.exportable);
|
||||
|
||||
// VUID-VkMemoryAllocateInfo-pNext-00639
|
||||
// Guaranteed because we always create a dedicated allocation
|
||||
|
||||
let external_memory_handle_types = ExternalMemoryHandleTypes {
|
||||
opaque_fd: true,
|
||||
..ExternalMemoryHandleTypes::empty()
|
||||
};
|
||||
let image = UnsafeImage::new(
|
||||
device.clone(),
|
||||
allocator.device().clone(),
|
||||
UnsafeImageCreateInfo {
|
||||
dimensions: ImageDimensions::Dim2d {
|
||||
width: dimensions[0],
|
||||
@ -501,51 +540,43 @@ impl AttachmentImage {
|
||||
},
|
||||
format: Some(format),
|
||||
samples,
|
||||
usage: ImageUsage {
|
||||
color_attachment: !is_depth,
|
||||
depth_stencil_attachment: is_depth,
|
||||
..base_usage
|
||||
},
|
||||
external_memory_handle_types: ExternalMemoryHandleTypes {
|
||||
opaque_fd: true,
|
||||
..ExternalMemoryHandleTypes::empty()
|
||||
},
|
||||
usage,
|
||||
external_memory_handle_types,
|
||||
mutable_format: true,
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
let requirements = image.memory_requirements();
|
||||
let memory_type_index = allocator
|
||||
.find_memory_type_index(requirements.memory_type_bits, MemoryUsage::GpuOnly.into())
|
||||
.expect("failed to find a suitable memory type");
|
||||
|
||||
let mem_reqs = image.memory_requirements();
|
||||
let memory = alloc_dedicated_with_exportable_fd(
|
||||
device.clone(),
|
||||
&mem_reqs,
|
||||
AllocLayout::Optimal,
|
||||
MappingRequirement::DoNotMap,
|
||||
DedicatedAllocation::Image(&image),
|
||||
|t| {
|
||||
if t.property_flags.device_local {
|
||||
AllocFromRequirementsFilter::Preferred
|
||||
} else {
|
||||
AllocFromRequirementsFilter::Allowed
|
||||
}
|
||||
},
|
||||
)?;
|
||||
match unsafe {
|
||||
allocator.allocate_dedicated_unchecked(
|
||||
memory_type_index,
|
||||
requirements.size,
|
||||
Some(DedicatedAllocation::Image(&image)),
|
||||
external_memory_handle_types,
|
||||
)
|
||||
} {
|
||||
Ok(alloc) => {
|
||||
debug_assert!(alloc.offset() % requirements.alignment == 0);
|
||||
debug_assert!(alloc.size() == requirements.size);
|
||||
unsafe { image.bind_memory(alloc.device_memory(), alloc.offset()) }?;
|
||||
|
||||
debug_assert!((memory.offset() % mem_reqs.alignment) == 0);
|
||||
unsafe {
|
||||
image.bind_memory(memory.memory(), memory.offset())?;
|
||||
Ok(Arc::new(AttachmentImage {
|
||||
image,
|
||||
memory: alloc,
|
||||
attachment_layout: if is_depth {
|
||||
ImageLayout::DepthStencilAttachmentOptimal
|
||||
} else {
|
||||
ImageLayout::ColorAttachmentOptimal
|
||||
},
|
||||
initialized: AtomicBool::new(false),
|
||||
}))
|
||||
}
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
|
||||
Ok(Arc::new(AttachmentImage {
|
||||
image,
|
||||
memory,
|
||||
attachment_layout: if is_depth {
|
||||
ImageLayout::DepthStencilAttachmentOptimal
|
||||
} else {
|
||||
ImageLayout::ColorAttachmentOptimal
|
||||
},
|
||||
initialized: AtomicBool::new(false),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Exports posix file descriptor for the allocated memory.
|
||||
@ -553,21 +584,19 @@ impl AttachmentImage {
|
||||
#[inline]
|
||||
pub fn export_posix_fd(&self) -> Result<File, DeviceMemoryError> {
|
||||
self.memory
|
||||
.memory()
|
||||
.device_memory()
|
||||
.export_fd(ExternalMemoryHandleType::OpaqueFd)
|
||||
}
|
||||
|
||||
/// Return the size of the allocated memory (used e.g. with cuda).
|
||||
#[inline]
|
||||
pub fn mem_size(&self) -> DeviceSize {
|
||||
self.memory.memory().allocation_size()
|
||||
self.memory.device_memory().allocation_size()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<A> ImageAccess for AttachmentImage<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
unsafe impl ImageAccess for AttachmentImage {
|
||||
#[inline]
|
||||
fn inner(&self) -> ImageInner<'_> {
|
||||
ImageInner {
|
||||
image: &self.image,
|
||||
@ -578,14 +607,17 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn initial_layout_requirement(&self) -> ImageLayout {
|
||||
self.attachment_layout
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn final_layout_requirement(&self) -> ImageLayout {
|
||||
self.attachment_layout
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn descriptor_layouts(&self) -> Option<ImageDescriptorLayouts> {
|
||||
Some(ImageDescriptorLayouts {
|
||||
storage_image: ImageLayout::General,
|
||||
@ -595,45 +627,40 @@ where
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn layout_initialized(&self) {
|
||||
self.initialized.store(true, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_layout_initialized(&self) -> bool {
|
||||
self.initialized.load(Ordering::SeqCst)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<A> DeviceOwned for AttachmentImage<A> {
|
||||
unsafe impl DeviceOwned for AttachmentImage {
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.image.device()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<P, A> ImageContent<P> for AttachmentImage<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
unsafe impl<P> ImageContent<P> for AttachmentImage {
|
||||
fn matches_format(&self) -> bool {
|
||||
true // FIXME:
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> PartialEq for AttachmentImage<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
impl PartialEq for AttachmentImage {
|
||||
#[inline]
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner() == other.inner()
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> Eq for AttachmentImage<A> where A: MemoryPoolAlloc {}
|
||||
impl Eq for AttachmentImage {}
|
||||
|
||||
impl<A> Hash for AttachmentImage<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
impl Hash for AttachmentImage {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.inner().hash(state);
|
||||
}
|
||||
@ -641,24 +668,29 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::AttachmentImage;
|
||||
use crate::format::Format;
|
||||
use super::*;
|
||||
use crate::memory::allocator::StandardMemoryAllocator;
|
||||
|
||||
#[test]
|
||||
fn create_regular() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let _img = AttachmentImage::new(device, [32, 32], Format::R8G8B8A8_UNORM).unwrap();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let _img =
|
||||
AttachmentImage::new(&memory_allocator, [32, 32], Format::R8G8B8A8_UNORM).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn create_transient() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let _img = AttachmentImage::transient(device, [32, 32], Format::R8G8B8A8_UNORM).unwrap();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let _img = AttachmentImage::transient(&memory_allocator, [32, 32], Format::R8G8B8A8_UNORM)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn d16_unorm_always_supported() {
|
||||
let (device, _) = gfx_dev_and_queue!();
|
||||
let _img = AttachmentImage::new(device, [32, 32], Format::D16_UNORM).unwrap();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let _img = AttachmentImage::new(&memory_allocator, [32, 32], Format::D16_UNORM).unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -22,11 +22,11 @@ use crate::{
|
||||
format::Format,
|
||||
image::sys::UnsafeImageCreateInfo,
|
||||
memory::{
|
||||
pool::{
|
||||
AllocFromRequirementsFilter, AllocLayout, MappingRequirement, MemoryPoolAlloc,
|
||||
PotentialDedicatedAllocation, StandardMemoryPoolAlloc,
|
||||
allocator::{
|
||||
AllocationCreateInfo, AllocationCreationError, AllocationType, MemoryAlloc,
|
||||
MemoryAllocatePreference, MemoryAllocator, MemoryUsage,
|
||||
},
|
||||
DedicatedAllocation, DeviceMemoryError, MemoryPool,
|
||||
DedicatedAllocation,
|
||||
},
|
||||
sampler::Filter,
|
||||
sync::Sharing,
|
||||
@ -44,10 +44,10 @@ use std::{
|
||||
/// but then you must only ever read from it.
|
||||
// TODO: type (2D, 3D, array, etc.) as template parameter
|
||||
#[derive(Debug)]
|
||||
pub struct ImmutableImage<A = PotentialDedicatedAllocation<StandardMemoryPoolAlloc>> {
|
||||
pub struct ImmutableImage {
|
||||
image: Arc<UnsafeImage>,
|
||||
dimensions: ImageDimensions,
|
||||
_memory: A,
|
||||
_memory: MemoryAlloc,
|
||||
layout: ImageLayout,
|
||||
}
|
||||
|
||||
@ -105,7 +105,7 @@ impl ImmutableImage {
|
||||
/// Returns two things: the image, and a special access that should be used for the initial
|
||||
/// upload to the image.
|
||||
pub fn uninitialized(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: ImageDimensions,
|
||||
format: Format,
|
||||
mip_levels: impl Into<MipmapsCount>,
|
||||
@ -118,7 +118,7 @@ impl ImmutableImage {
|
||||
let queue_family_indices: SmallVec<[_; 4]> = queue_family_indices.into_iter().collect();
|
||||
|
||||
let image = UnsafeImage::new(
|
||||
device.clone(),
|
||||
allocator.device().clone(),
|
||||
UnsafeImageCreateInfo {
|
||||
dimensions,
|
||||
format: Some(format),
|
||||
@ -140,39 +140,37 @@ impl ImmutableImage {
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
let requirements = image.memory_requirements();
|
||||
let create_info = AllocationCreateInfo {
|
||||
requirements,
|
||||
allocation_type: AllocationType::NonLinear,
|
||||
usage: MemoryUsage::GpuOnly,
|
||||
allocate_preference: MemoryAllocatePreference::Unknown,
|
||||
dedicated_allocation: Some(DedicatedAllocation::Image(&image)),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mem_reqs = image.memory_requirements();
|
||||
let memory = MemoryPool::alloc_from_requirements(
|
||||
&device.standard_memory_pool(),
|
||||
&mem_reqs,
|
||||
AllocLayout::Optimal,
|
||||
MappingRequirement::DoNotMap,
|
||||
Some(DedicatedAllocation::Image(&image)),
|
||||
|t| {
|
||||
if t.property_flags.device_local {
|
||||
AllocFromRequirementsFilter::Preferred
|
||||
} else {
|
||||
AllocFromRequirementsFilter::Allowed
|
||||
}
|
||||
},
|
||||
)?;
|
||||
debug_assert!((memory.offset() % mem_reqs.alignment) == 0);
|
||||
unsafe {
|
||||
image.bind_memory(memory.memory(), memory.offset())?;
|
||||
match unsafe { allocator.allocate_unchecked(create_info) } {
|
||||
Ok(alloc) => {
|
||||
debug_assert!(alloc.offset() % requirements.alignment == 0);
|
||||
debug_assert!(alloc.size() == requirements.size);
|
||||
unsafe { image.bind_memory(alloc.device_memory(), alloc.offset()) }?;
|
||||
|
||||
let image = Arc::new(ImmutableImage {
|
||||
image,
|
||||
_memory: alloc,
|
||||
dimensions,
|
||||
layout,
|
||||
});
|
||||
|
||||
let init = Arc::new(ImmutableImageInitialization {
|
||||
image: image.clone(),
|
||||
});
|
||||
|
||||
Ok((image, init))
|
||||
}
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
|
||||
let image = Arc::new(ImmutableImage {
|
||||
image,
|
||||
_memory: memory,
|
||||
dimensions,
|
||||
layout,
|
||||
});
|
||||
|
||||
let init = Arc::new(ImmutableImageInitialization {
|
||||
image: image.clone(),
|
||||
});
|
||||
|
||||
Ok((image, init))
|
||||
}
|
||||
|
||||
/// Construct an ImmutableImage from the contents of `iter`.
|
||||
@ -181,6 +179,7 @@ impl ImmutableImage {
|
||||
/// `iter` to it, then calling [`from_buffer`](ImmutableImage::from_buffer) to copy the data
|
||||
/// over.
|
||||
pub fn from_iter<Px, I, L, A>(
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
iter: I,
|
||||
dimensions: ImageDimensions,
|
||||
mip_levels: MipmapsCount,
|
||||
@ -194,7 +193,7 @@ impl ImmutableImage {
|
||||
A: CommandBufferAllocator,
|
||||
{
|
||||
let source = CpuAccessibleBuffer::from_iter(
|
||||
command_buffer_builder.device().clone(),
|
||||
allocator,
|
||||
BufferUsage {
|
||||
transfer_src: true,
|
||||
..BufferUsage::empty()
|
||||
@ -202,7 +201,9 @@ impl ImmutableImage {
|
||||
false,
|
||||
iter,
|
||||
)?;
|
||||
|
||||
ImmutableImage::from_buffer(
|
||||
allocator,
|
||||
source,
|
||||
dimensions,
|
||||
mip_levels,
|
||||
@ -221,6 +222,7 @@ impl ImmutableImage {
|
||||
/// `command_buffer_builder` can then be used to record other commands, built, and executed as
|
||||
/// normal. If it is not executed, the image contents will be left undefined.
|
||||
pub fn from_buffer<L, A>(
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
source: Arc<dyn BufferAccess>,
|
||||
dimensions: ImageDimensions,
|
||||
mip_levels: MipmapsCount,
|
||||
@ -258,7 +260,7 @@ impl ImmutableImage {
|
||||
let layout = ImageLayout::ShaderReadOnlyOptimal;
|
||||
|
||||
let (image, initializer) = ImmutableImage::uninitialized(
|
||||
source.device().clone(),
|
||||
allocator,
|
||||
dimensions,
|
||||
format,
|
||||
mip_levels,
|
||||
@ -292,16 +294,15 @@ impl ImmutableImage {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<A> DeviceOwned for ImmutableImage<A> {
|
||||
unsafe impl DeviceOwned for ImmutableImage {
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.image.device()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<A> ImageAccess for ImmutableImage<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
unsafe impl ImageAccess for ImmutableImage {
|
||||
#[inline]
|
||||
fn inner(&self) -> ImageInner<'_> {
|
||||
ImageInner {
|
||||
image: &self.image,
|
||||
@ -312,18 +313,22 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn is_layout_initialized(&self) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn initial_layout_requirement(&self) -> ImageLayout {
|
||||
self.layout
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn final_layout_requirement(&self) -> ImageLayout {
|
||||
self.layout
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn descriptor_layouts(&self) -> Option<ImageDescriptorLayouts> {
|
||||
Some(ImageDescriptorLayouts {
|
||||
storage_image: ImageLayout::General,
|
||||
@ -334,82 +339,71 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<P, A> ImageContent<P> for ImmutableImage<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
unsafe impl<P> ImageContent<P> for ImmutableImage {
|
||||
fn matches_format(&self) -> bool {
|
||||
true // FIXME:
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> PartialEq for ImmutableImage<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
impl PartialEq for ImmutableImage {
|
||||
#[inline]
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner() == other.inner()
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> Eq for ImmutableImage<A> where A: MemoryPoolAlloc {}
|
||||
impl Eq for ImmutableImage {}
|
||||
|
||||
impl<A> Hash for ImmutableImage<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
impl Hash for ImmutableImage {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.inner().hash(state);
|
||||
}
|
||||
}
|
||||
|
||||
// Must not implement Clone, as that would lead to multiple `used` values.
|
||||
pub struct ImmutableImageInitialization<A = PotentialDedicatedAllocation<StandardMemoryPoolAlloc>> {
|
||||
image: Arc<ImmutableImage<A>>,
|
||||
pub struct ImmutableImageInitialization {
|
||||
image: Arc<ImmutableImage>,
|
||||
}
|
||||
|
||||
unsafe impl<A> DeviceOwned for ImmutableImageInitialization<A> {
|
||||
unsafe impl DeviceOwned for ImmutableImageInitialization {
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.image.device()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<A> ImageAccess for ImmutableImageInitialization<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
unsafe impl ImageAccess for ImmutableImageInitialization {
|
||||
#[inline]
|
||||
fn inner(&self) -> ImageInner<'_> {
|
||||
self.image.inner()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn initial_layout_requirement(&self) -> ImageLayout {
|
||||
ImageLayout::Undefined
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn final_layout_requirement(&self) -> ImageLayout {
|
||||
self.image.layout
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn descriptor_layouts(&self) -> Option<ImageDescriptorLayouts> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> PartialEq for ImmutableImageInitialization<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
impl PartialEq for ImmutableImageInitialization {
|
||||
#[inline]
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner() == other.inner()
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> Eq for ImmutableImageInitialization<A> where A: MemoryPoolAlloc {}
|
||||
impl Eq for ImmutableImageInitialization {}
|
||||
|
||||
impl<A> Hash for ImmutableImageInitialization<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
impl Hash for ImmutableImageInitialization {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.inner().hash(state);
|
||||
}
|
||||
@ -419,7 +413,7 @@ where
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ImmutableImageCreationError {
|
||||
ImageCreationError(ImageCreationError),
|
||||
DeviceMemoryAllocationError(DeviceMemoryError),
|
||||
AllocError(AllocationCreationError),
|
||||
CommandBufferBeginError(CommandBufferBeginError),
|
||||
|
||||
/// The size of the provided source data is less than the required size for an image with the
|
||||
@ -434,7 +428,7 @@ impl Error for ImmutableImageCreationError {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
match self {
|
||||
Self::ImageCreationError(err) => Some(err),
|
||||
Self::DeviceMemoryAllocationError(err) => Some(err),
|
||||
Self::AllocError(err) => Some(err),
|
||||
Self::CommandBufferBeginError(err) => Some(err),
|
||||
_ => None,
|
||||
}
|
||||
@ -445,15 +439,15 @@ impl Display for ImmutableImageCreationError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
|
||||
match self {
|
||||
Self::ImageCreationError(err) => err.fmt(f),
|
||||
Self::DeviceMemoryAllocationError(err) => err.fmt(f),
|
||||
Self::AllocError(err) => err.fmt(f),
|
||||
Self::CommandBufferBeginError(err) => err.fmt(f),
|
||||
|
||||
Self::SourceTooSmall {
|
||||
source_size,
|
||||
required_size,
|
||||
} => write!(
|
||||
f,
|
||||
"the size of the provided source data ({} bytes) is less than the required size for an image of the given format and dimensions ({} bytes)",
|
||||
"the size of the provided source data ({} bytes) is less than the required size \
|
||||
for an image of the given format and dimensions ({} bytes)",
|
||||
source_size, required_size,
|
||||
),
|
||||
}
|
||||
@ -466,15 +460,15 @@ impl From<ImageCreationError> for ImmutableImageCreationError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DeviceMemoryError> for ImmutableImageCreationError {
|
||||
fn from(err: DeviceMemoryError) -> Self {
|
||||
Self::DeviceMemoryAllocationError(err)
|
||||
impl From<AllocationCreationError> for ImmutableImageCreationError {
|
||||
fn from(err: AllocationCreationError) -> Self {
|
||||
Self::AllocError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<OomError> for ImmutableImageCreationError {
|
||||
fn from(err: OomError) -> Self {
|
||||
Self::DeviceMemoryAllocationError(err.into())
|
||||
Self::AllocError(err.into())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -915,6 +915,7 @@ mod tests {
|
||||
},
|
||||
format::Format,
|
||||
image::{ImageAccess, ImageDimensions, ImmutableImage, MipmapsCount},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
};
|
||||
|
||||
#[test]
|
||||
@ -1021,14 +1022,15 @@ mod tests {
|
||||
fn mipmap_working_immutable_image() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
|
||||
let command_buffer_allocator = StandardCommandBufferAllocator::new(device);
|
||||
let mut command_buffer_builder = AutoCommandBufferBuilder::primary(
|
||||
&command_buffer_allocator,
|
||||
let cb_allocator = StandardCommandBufferAllocator::new(device.clone());
|
||||
let mut cbb = AutoCommandBufferBuilder::primary(
|
||||
&cb_allocator,
|
||||
queue.queue_family_index(),
|
||||
CommandBufferUsage::OneTimeSubmit,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let dimensions = ImageDimensions::Dim2d {
|
||||
width: 512,
|
||||
height: 512,
|
||||
@ -1040,11 +1042,12 @@ mod tests {
|
||||
vec.resize(512 * 512, 0u8);
|
||||
|
||||
let image = ImmutableImage::from_iter(
|
||||
&memory_allocator,
|
||||
vec.into_iter(),
|
||||
dimensions,
|
||||
MipmapsCount::One,
|
||||
Format::R8_UNORM,
|
||||
&mut command_buffer_builder,
|
||||
&mut cbb,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(image.mip_levels(), 1);
|
||||
@ -1055,11 +1058,12 @@ mod tests {
|
||||
vec.resize(512 * 512, 0u8);
|
||||
|
||||
let image = ImmutableImage::from_iter(
|
||||
&memory_allocator,
|
||||
vec.into_iter(),
|
||||
dimensions,
|
||||
MipmapsCount::Log2,
|
||||
Format::R8_UNORM,
|
||||
&mut command_buffer_builder,
|
||||
&mut cbb,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(image.mip_levels(), 10);
|
||||
|
@ -14,14 +14,14 @@ use super::{
|
||||
use crate::{
|
||||
device::{Device, DeviceOwned, Queue},
|
||||
format::Format,
|
||||
image::{sys::UnsafeImageCreateInfo, view::ImageView},
|
||||
image::{sys::UnsafeImageCreateInfo, view::ImageView, ImageFormatInfo},
|
||||
memory::{
|
||||
pool::{
|
||||
alloc_dedicated_with_exportable_fd, AllocFromRequirementsFilter, AllocLayout,
|
||||
MappingRequirement, MemoryPoolAlloc, PotentialDedicatedAllocation, StandardMemoryPool,
|
||||
allocator::{
|
||||
AllocationCreateInfo, AllocationType, MemoryAlloc, MemoryAllocatePreference,
|
||||
MemoryAllocator, MemoryUsage,
|
||||
},
|
||||
DedicatedAllocation, DeviceMemoryError, ExternalMemoryHandleType,
|
||||
ExternalMemoryHandleTypes, MemoryPool,
|
||||
ExternalMemoryHandleTypes,
|
||||
},
|
||||
sync::Sharing,
|
||||
DeviceSize,
|
||||
@ -36,15 +36,12 @@ use std::{
|
||||
/// General-purpose image in device memory. Can be used for any usage, but will be slower than a
|
||||
/// specialized image.
|
||||
#[derive(Debug)]
|
||||
pub struct StorageImage<A = Arc<StandardMemoryPool>>
|
||||
where
|
||||
A: MemoryPool,
|
||||
{
|
||||
pub struct StorageImage {
|
||||
// Inner implementation.
|
||||
image: Arc<UnsafeImage>,
|
||||
|
||||
// Memory used to back the image.
|
||||
memory: PotentialDedicatedAllocation<A::Alloc>,
|
||||
memory: MemoryAlloc,
|
||||
|
||||
// Dimensions of the image.
|
||||
dimensions: ImageDimensions,
|
||||
@ -53,7 +50,7 @@ where
|
||||
impl StorageImage {
|
||||
/// Creates a new image with the given dimensions and format.
|
||||
pub fn new(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: ImageDimensions,
|
||||
format: Format,
|
||||
queue_family_indices: impl IntoIterator<Item = u32>,
|
||||
@ -78,7 +75,7 @@ impl StorageImage {
|
||||
let flags = ImageCreateFlags::empty();
|
||||
|
||||
StorageImage::with_usage(
|
||||
device,
|
||||
allocator,
|
||||
dimensions,
|
||||
format,
|
||||
usage,
|
||||
@ -89,7 +86,7 @@ impl StorageImage {
|
||||
|
||||
/// Same as `new`, but allows specifying the usage.
|
||||
pub fn with_usage(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: ImageDimensions,
|
||||
format: Format,
|
||||
usage: ImageUsage,
|
||||
@ -99,7 +96,7 @@ impl StorageImage {
|
||||
let queue_family_indices: SmallVec<[_; 4]> = queue_family_indices.into_iter().collect();
|
||||
|
||||
let image = UnsafeImage::new(
|
||||
device.clone(),
|
||||
allocator.device().clone(),
|
||||
UnsafeImageCreateInfo {
|
||||
dimensions,
|
||||
format: Some(format),
|
||||
@ -116,36 +113,34 @@ impl StorageImage {
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
let requirements = image.memory_requirements();
|
||||
let create_info = AllocationCreateInfo {
|
||||
requirements,
|
||||
allocation_type: AllocationType::NonLinear,
|
||||
usage: MemoryUsage::GpuOnly,
|
||||
allocate_preference: MemoryAllocatePreference::Unknown,
|
||||
dedicated_allocation: Some(DedicatedAllocation::Image(&image)),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mem_reqs = image.memory_requirements();
|
||||
let memory = MemoryPool::alloc_from_requirements(
|
||||
&device.standard_memory_pool(),
|
||||
&mem_reqs,
|
||||
AllocLayout::Optimal,
|
||||
MappingRequirement::DoNotMap,
|
||||
Some(DedicatedAllocation::Image(&image)),
|
||||
|t| {
|
||||
if t.property_flags.device_local {
|
||||
AllocFromRequirementsFilter::Preferred
|
||||
} else {
|
||||
AllocFromRequirementsFilter::Allowed
|
||||
}
|
||||
},
|
||||
)?;
|
||||
debug_assert!((memory.offset() % mem_reqs.alignment) == 0);
|
||||
unsafe {
|
||||
image.bind_memory(memory.memory(), memory.offset())?;
|
||||
match unsafe { allocator.allocate_unchecked(create_info) } {
|
||||
Ok(alloc) => {
|
||||
debug_assert!(alloc.offset() % requirements.alignment == 0);
|
||||
debug_assert!(alloc.size() == requirements.size);
|
||||
unsafe { image.bind_memory(alloc.device_memory(), alloc.offset()) }?;
|
||||
|
||||
Ok(Arc::new(StorageImage {
|
||||
image,
|
||||
memory: alloc,
|
||||
dimensions,
|
||||
}))
|
||||
}
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
|
||||
Ok(Arc::new(StorageImage {
|
||||
image,
|
||||
memory,
|
||||
dimensions,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn new_with_exportable_fd(
|
||||
device: Arc<Device>,
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
dimensions: ImageDimensions,
|
||||
format: Format,
|
||||
usage: ImageUsage,
|
||||
@ -154,8 +149,35 @@ impl StorageImage {
|
||||
) -> Result<Arc<StorageImage>, ImageCreationError> {
|
||||
let queue_family_indices: SmallVec<[_; 4]> = queue_family_indices.into_iter().collect();
|
||||
|
||||
let external_memory_properties = allocator
|
||||
.device()
|
||||
.physical_device()
|
||||
.image_format_properties(ImageFormatInfo {
|
||||
format: Some(format),
|
||||
image_type: dimensions.image_type(),
|
||||
usage,
|
||||
external_memory_handle_type: Some(ExternalMemoryHandleType::OpaqueFd),
|
||||
mutable_format: flags.mutable_format,
|
||||
cube_compatible: flags.cube_compatible,
|
||||
array_2d_compatible: flags.array_2d_compatible,
|
||||
block_texel_view_compatible: flags.block_texel_view_compatible,
|
||||
..Default::default()
|
||||
})
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.external_memory_properties;
|
||||
// VUID-VkExportMemoryAllocateInfo-handleTypes-00656
|
||||
assert!(external_memory_properties.exportable);
|
||||
|
||||
// VUID-VkMemoryAllocateInfo-pNext-00639
|
||||
// Guaranteed because we always create a dedicated allocation
|
||||
|
||||
let external_memory_handle_types = ExternalMemoryHandleTypes {
|
||||
opaque_fd: true,
|
||||
..ExternalMemoryHandleTypes::empty()
|
||||
};
|
||||
let image = UnsafeImage::new(
|
||||
device.clone(),
|
||||
allocator.device().clone(),
|
||||
UnsafeImageCreateInfo {
|
||||
dimensions,
|
||||
format: Some(format),
|
||||
@ -165,10 +187,7 @@ impl StorageImage {
|
||||
} else {
|
||||
Sharing::Exclusive
|
||||
},
|
||||
external_memory_handle_types: ExternalMemoryHandleTypes {
|
||||
opaque_fd: true,
|
||||
..ExternalMemoryHandleTypes::empty()
|
||||
},
|
||||
external_memory_handle_types,
|
||||
mutable_format: flags.mutable_format,
|
||||
cube_compatible: flags.cube_compatible,
|
||||
array_2d_compatible: flags.array_2d_compatible,
|
||||
@ -176,37 +195,38 @@ impl StorageImage {
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
let requirements = image.memory_requirements();
|
||||
let memory_type_index = allocator
|
||||
.find_memory_type_index(requirements.memory_type_bits, MemoryUsage::GpuOnly.into())
|
||||
.expect("failed to find a suitable memory type");
|
||||
|
||||
let mem_reqs = image.memory_requirements();
|
||||
let memory = alloc_dedicated_with_exportable_fd(
|
||||
device,
|
||||
&mem_reqs,
|
||||
AllocLayout::Optimal,
|
||||
MappingRequirement::DoNotMap,
|
||||
DedicatedAllocation::Image(&image),
|
||||
|t| {
|
||||
if t.property_flags.device_local {
|
||||
AllocFromRequirementsFilter::Preferred
|
||||
} else {
|
||||
AllocFromRequirementsFilter::Allowed
|
||||
}
|
||||
},
|
||||
)?;
|
||||
debug_assert!((memory.offset() % mem_reqs.alignment) == 0);
|
||||
unsafe {
|
||||
image.bind_memory(memory.memory(), memory.offset())?;
|
||||
match unsafe {
|
||||
allocator.allocate_dedicated_unchecked(
|
||||
memory_type_index,
|
||||
requirements.size,
|
||||
Some(DedicatedAllocation::Image(&image)),
|
||||
external_memory_handle_types,
|
||||
)
|
||||
} {
|
||||
Ok(alloc) => {
|
||||
debug_assert!(alloc.offset() % requirements.alignment == 0);
|
||||
debug_assert!(alloc.size() == requirements.size);
|
||||
unsafe { image.bind_memory(alloc.device_memory(), alloc.offset()) }?;
|
||||
|
||||
Ok(Arc::new(StorageImage {
|
||||
image,
|
||||
memory: alloc,
|
||||
dimensions,
|
||||
}))
|
||||
}
|
||||
Err(err) => Err(err.into()),
|
||||
}
|
||||
|
||||
Ok(Arc::new(StorageImage {
|
||||
image,
|
||||
memory,
|
||||
dimensions,
|
||||
}))
|
||||
}
|
||||
|
||||
/// Allows the creation of a simple 2D general purpose image view from `StorageImage`.
|
||||
#[inline]
|
||||
pub fn general_purpose_image_view(
|
||||
allocator: &(impl MemoryAllocator + ?Sized),
|
||||
queue: Arc<Queue>,
|
||||
size: [u32; 2],
|
||||
format: Format,
|
||||
@ -219,7 +239,7 @@ impl StorageImage {
|
||||
};
|
||||
let flags = ImageCreateFlags::empty();
|
||||
let image_result = StorageImage::with_usage(
|
||||
queue.device().clone(),
|
||||
allocator,
|
||||
dims,
|
||||
format,
|
||||
usage,
|
||||
@ -244,30 +264,26 @@ impl StorageImage {
|
||||
#[inline]
|
||||
pub fn export_posix_fd(&self) -> Result<File, DeviceMemoryError> {
|
||||
self.memory
|
||||
.memory()
|
||||
.device_memory()
|
||||
.export_fd(ExternalMemoryHandleType::OpaqueFd)
|
||||
}
|
||||
|
||||
/// Return the size of the allocated memory (used e.g. with cuda).
|
||||
#[inline]
|
||||
pub fn mem_size(&self) -> DeviceSize {
|
||||
self.memory.memory().allocation_size()
|
||||
self.memory.device_memory().allocation_size()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<A> DeviceOwned for StorageImage<A>
|
||||
where
|
||||
A: MemoryPool,
|
||||
{
|
||||
unsafe impl DeviceOwned for StorageImage {
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
self.image.device()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<A> ImageAccess for StorageImage<A>
|
||||
where
|
||||
A: MemoryPool,
|
||||
{
|
||||
unsafe impl ImageAccess for StorageImage {
|
||||
#[inline]
|
||||
fn inner(&self) -> ImageInner<'_> {
|
||||
ImageInner {
|
||||
image: &self.image,
|
||||
@ -278,14 +294,17 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn initial_layout_requirement(&self) -> ImageLayout {
|
||||
ImageLayout::General
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn final_layout_requirement(&self) -> ImageLayout {
|
||||
ImageLayout::General
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn descriptor_layouts(&self) -> Option<ImageDescriptorLayouts> {
|
||||
Some(ImageDescriptorLayouts {
|
||||
storage_image: ImageLayout::General,
|
||||
@ -296,30 +315,22 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<P, A> ImageContent<P> for StorageImage<A>
|
||||
where
|
||||
A: MemoryPool,
|
||||
{
|
||||
unsafe impl<P> ImageContent<P> for StorageImage {
|
||||
fn matches_format(&self) -> bool {
|
||||
true // FIXME:
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> PartialEq for StorageImage<A>
|
||||
where
|
||||
A: MemoryPool,
|
||||
{
|
||||
impl PartialEq for StorageImage {
|
||||
#[inline]
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.inner() == other.inner()
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> Eq for StorageImage<A> where A: MemoryPool {}
|
||||
impl Eq for StorageImage {}
|
||||
|
||||
impl<A> Hash for StorageImage<A>
|
||||
where
|
||||
A: MemoryPool,
|
||||
{
|
||||
impl Hash for StorageImage {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.inner().hash(state);
|
||||
}
|
||||
@ -327,20 +338,15 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::StorageImage;
|
||||
use crate::{
|
||||
format::Format,
|
||||
image::{
|
||||
view::ImageViewCreationError, ImageAccess, ImageCreationError, ImageDimensions,
|
||||
ImageUsage,
|
||||
},
|
||||
};
|
||||
use super::*;
|
||||
use crate::{image::view::ImageViewCreationError, memory::allocator::StandardMemoryAllocator};
|
||||
|
||||
#[test]
|
||||
fn create() {
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let _img = StorageImage::new(
|
||||
device,
|
||||
&memory_allocator,
|
||||
ImageDimensions::Dim2d {
|
||||
width: 32,
|
||||
height: 32,
|
||||
@ -354,7 +360,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn create_general_purpose_image_view() {
|
||||
let (_device, queue) = gfx_dev_and_queue!();
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let usage = ImageUsage {
|
||||
transfer_src: true,
|
||||
transfer_dst: true,
|
||||
@ -362,6 +369,7 @@ mod tests {
|
||||
..ImageUsage::empty()
|
||||
};
|
||||
let img_view = StorageImage::general_purpose_image_view(
|
||||
&memory_allocator,
|
||||
queue,
|
||||
[32, 32],
|
||||
Format::R8G8B8A8_UNORM,
|
||||
@ -373,13 +381,15 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn create_general_purpose_image_view_failed() {
|
||||
let (_device, queue) = gfx_dev_and_queue!();
|
||||
let (device, queue) = gfx_dev_and_queue!();
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
// Not valid for image view...
|
||||
let usage = ImageUsage {
|
||||
transfer_src: true,
|
||||
..ImageUsage::empty()
|
||||
};
|
||||
let img_result = StorageImage::general_purpose_image_view(
|
||||
&memory_allocator,
|
||||
queue,
|
||||
[32, 32],
|
||||
Format::R8G8B8A8_UNORM,
|
||||
|
@ -27,8 +27,8 @@ use crate::{
|
||||
SparseImageFormatProperties,
|
||||
},
|
||||
memory::{
|
||||
DeviceMemory, DeviceMemoryError, ExternalMemoryHandleType, ExternalMemoryHandleTypes,
|
||||
MemoryRequirements,
|
||||
allocator::AllocationCreationError, DeviceMemory, ExternalMemoryHandleType,
|
||||
ExternalMemoryHandleTypes, MemoryRequirements,
|
||||
},
|
||||
range_map::RangeMap,
|
||||
sync::{AccessError, CurrentAccess, Sharing},
|
||||
@ -1917,11 +1917,11 @@ impl Default for UnsafeImageCreateInfo {
|
||||
}
|
||||
}
|
||||
|
||||
/// Error that can happen when creating an instance.
|
||||
/// Error that can happen when creating an image.
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub enum ImageCreationError {
|
||||
/// Allocating memory failed.
|
||||
AllocError(DeviceMemoryError),
|
||||
AllocError(AllocationCreationError),
|
||||
|
||||
RequirementNotMet {
|
||||
required_for: &'static str,
|
||||
@ -2174,12 +2174,12 @@ impl Display for ImageCreationError {
|
||||
|
||||
impl From<OomError> for ImageCreationError {
|
||||
fn from(err: OomError) -> Self {
|
||||
Self::AllocError(DeviceMemoryError::OomError(err))
|
||||
Self::AllocError(err.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DeviceMemoryError> for ImageCreationError {
|
||||
fn from(err: DeviceMemoryError) -> Self {
|
||||
impl From<AllocationCreationError> for ImageCreationError {
|
||||
fn from(err: AllocationCreationError) -> Self {
|
||||
Self::AllocError(err)
|
||||
}
|
||||
}
|
||||
@ -2187,8 +2187,12 @@ impl From<DeviceMemoryError> for ImageCreationError {
|
||||
impl From<VulkanError> for ImageCreationError {
|
||||
fn from(err: VulkanError) -> Self {
|
||||
match err {
|
||||
err @ VulkanError::OutOfHostMemory => Self::AllocError(err.into()),
|
||||
err @ VulkanError::OutOfDeviceMemory => Self::AllocError(err.into()),
|
||||
VulkanError::OutOfHostMemory => {
|
||||
Self::AllocError(AllocationCreationError::OutOfHostMemory)
|
||||
}
|
||||
VulkanError::OutOfDeviceMemory => {
|
||||
Self::AllocError(AllocationCreationError::OutOfDeviceMemory)
|
||||
}
|
||||
_ => panic!("unexpected error: {:?}", err),
|
||||
}
|
||||
}
|
||||
|
1593
vulkano/src/memory/allocator/mod.rs
Normal file
1593
vulkano/src/memory/allocator/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
3090
vulkano/src/memory/allocator/suballocator.rs
Normal file
3090
vulkano/src/memory/allocator/suballocator.rs
Normal file
File diff suppressed because it is too large
Load Diff
@ -92,13 +92,9 @@
|
||||
//! get memory from that pool. By default if you don't specify any pool when creating a buffer or
|
||||
//! an image, an instance of `StandardMemoryPool` that is shared by the `Device` object is used.
|
||||
|
||||
pub use self::{
|
||||
device_memory::{
|
||||
DeviceMemory, DeviceMemoryError, ExternalMemoryHandleType, ExternalMemoryHandleTypes,
|
||||
MappedDeviceMemory, MemoryAllocateFlags, MemoryAllocateInfo, MemoryImportInfo,
|
||||
MemoryMapError,
|
||||
},
|
||||
pool::MemoryPool,
|
||||
pub use self::device_memory::{
|
||||
DeviceMemory, DeviceMemoryError, ExternalMemoryHandleType, ExternalMemoryHandleTypes,
|
||||
MappedDeviceMemory, MemoryAllocateFlags, MemoryAllocateInfo, MemoryImportInfo, MemoryMapError,
|
||||
};
|
||||
use crate::{
|
||||
buffer::{sys::UnsafeBuffer, BufferAccess},
|
||||
@ -109,8 +105,8 @@ use crate::{
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub mod allocator;
|
||||
mod device_memory;
|
||||
pub mod pool;
|
||||
|
||||
/// Properties of the memory in a physical device.
|
||||
#[derive(Clone, Debug)]
|
||||
|
@ -1,181 +0,0 @@
|
||||
// Copyright (c) 2016 The vulkano developers
|
||||
// Licensed under the Apache License, Version 2.0
|
||||
// <LICENSE-APACHE or
|
||||
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
|
||||
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
|
||||
// at your option. All files in the project carrying such
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use crate::{
|
||||
device::Device,
|
||||
memory::{
|
||||
device_memory::MemoryAllocateInfo, DeviceMemory, DeviceMemoryError, MappedDeviceMemory,
|
||||
},
|
||||
DeviceSize,
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
use std::{cmp, ops::Range, sync::Arc};
|
||||
|
||||
/// Memory pool that operates on a given memory type.
|
||||
#[derive(Debug)]
|
||||
pub struct StandardHostVisibleMemoryTypePool {
|
||||
device: Arc<Device>,
|
||||
memory_type_index: u32,
|
||||
// TODO: obviously very inefficient
|
||||
occupied: Mutex<Vec<(Arc<MappedDeviceMemory>, Vec<Range<DeviceSize>>)>>,
|
||||
}
|
||||
|
||||
impl StandardHostVisibleMemoryTypePool {
|
||||
/// Creates a new pool that will operate on the given memory type.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// - Panics if `memory_type_index` is out of range.
|
||||
/// - Panics if `memory_type_index` refers to a memory type that is not host-visible.
|
||||
///
|
||||
#[inline]
|
||||
pub fn new(
|
||||
device: Arc<Device>,
|
||||
memory_type_index: u32,
|
||||
) -> Arc<StandardHostVisibleMemoryTypePool> {
|
||||
let memory_type =
|
||||
&device.physical_device().memory_properties().memory_types[memory_type_index as usize];
|
||||
assert!(memory_type.property_flags.host_visible);
|
||||
|
||||
Arc::new(StandardHostVisibleMemoryTypePool {
|
||||
device,
|
||||
memory_type_index,
|
||||
occupied: Mutex::new(Vec::new()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Allocates memory from the pool.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// - Panics if `size` is 0.
|
||||
/// - Panics if `alignment` is 0.
|
||||
///
|
||||
pub fn alloc(
|
||||
self: &Arc<Self>,
|
||||
size: DeviceSize,
|
||||
alignment: DeviceSize,
|
||||
) -> Result<StandardHostVisibleMemoryTypePoolAlloc, DeviceMemoryError> {
|
||||
assert!(size != 0);
|
||||
assert!(alignment != 0);
|
||||
|
||||
#[inline]
|
||||
fn align(val: DeviceSize, al: DeviceSize) -> DeviceSize {
|
||||
al * (1 + (val - 1) / al)
|
||||
}
|
||||
|
||||
// Find a location.
|
||||
let mut occupied = self.occupied.lock();
|
||||
|
||||
// Try finding an entry in already-allocated chunks.
|
||||
for &mut (ref dev_mem, ref mut entries) in occupied.iter_mut() {
|
||||
// Try find some free space in-between two entries.
|
||||
for i in 0..entries.len().saturating_sub(1) {
|
||||
let entry1 = entries[i].clone();
|
||||
let entry1_end = align(entry1.end, alignment);
|
||||
let entry2 = entries[i + 1].clone();
|
||||
if entry1_end + size <= entry2.start {
|
||||
entries.insert(i + 1, entry1_end..entry1_end + size);
|
||||
return Ok(StandardHostVisibleMemoryTypePoolAlloc {
|
||||
pool: self.clone(),
|
||||
memory: dev_mem.clone(),
|
||||
offset: entry1_end,
|
||||
size,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Try append at the end.
|
||||
let last_end = entries.last().map(|e| align(e.end, alignment)).unwrap_or(0);
|
||||
if last_end + size <= (**dev_mem).as_ref().allocation_size() {
|
||||
entries.push(last_end..last_end + size);
|
||||
return Ok(StandardHostVisibleMemoryTypePoolAlloc {
|
||||
pool: self.clone(),
|
||||
memory: dev_mem.clone(),
|
||||
offset: last_end,
|
||||
size,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// We need to allocate a new block.
|
||||
let new_block = {
|
||||
const MIN_BLOCK_SIZE: DeviceSize = 8 * 1024 * 1024; // 8 MB
|
||||
let allocation_size = cmp::max(MIN_BLOCK_SIZE, size.next_power_of_two());
|
||||
let memory = DeviceMemory::allocate(
|
||||
self.device.clone(),
|
||||
MemoryAllocateInfo {
|
||||
allocation_size,
|
||||
memory_type_index: self.memory_type_index,
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
let new_block = MappedDeviceMemory::new(memory, 0..allocation_size)?;
|
||||
Arc::new(new_block)
|
||||
};
|
||||
|
||||
occupied.push((new_block.clone(), vec![0..size]));
|
||||
Ok(StandardHostVisibleMemoryTypePoolAlloc {
|
||||
pool: self.clone(),
|
||||
memory: new_block,
|
||||
offset: 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the device this pool operates on.
|
||||
#[inline]
|
||||
pub fn device(&self) -> &Arc<Device> {
|
||||
&self.device
|
||||
}
|
||||
|
||||
/// Returns the index of the memory type this pool operates on.
|
||||
#[inline]
|
||||
pub fn memory_type_index(&self) -> u32 {
|
||||
self.memory_type_index
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct StandardHostVisibleMemoryTypePoolAlloc {
|
||||
pool: Arc<StandardHostVisibleMemoryTypePool>,
|
||||
memory: Arc<MappedDeviceMemory>,
|
||||
offset: DeviceSize,
|
||||
size: DeviceSize,
|
||||
}
|
||||
|
||||
impl StandardHostVisibleMemoryTypePoolAlloc {
|
||||
#[inline]
|
||||
pub fn memory(&self) -> &MappedDeviceMemory {
|
||||
&self.memory
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn offset(&self) -> DeviceSize {
|
||||
self.offset
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn size(&self) -> DeviceSize {
|
||||
self.size
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for StandardHostVisibleMemoryTypePoolAlloc {
|
||||
fn drop(&mut self) {
|
||||
let mut occupied = self.pool.occupied.lock();
|
||||
|
||||
let entries = occupied
|
||||
.iter_mut()
|
||||
.find(|e| &*e.0 as *const MappedDeviceMemory == &*self.memory)
|
||||
.unwrap();
|
||||
|
||||
entries.1.retain(|e| e.start != self.offset);
|
||||
}
|
||||
}
|
@ -1,322 +0,0 @@
|
||||
// Copyright (c) 2016 The vulkano developers
|
||||
// Licensed under the Apache License, Version 2.0
|
||||
// <LICENSE-APACHE or
|
||||
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
|
||||
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
|
||||
// at your option. All files in the project carrying such
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
pub use self::{
|
||||
host_visible::{StandardHostVisibleMemoryTypePool, StandardHostVisibleMemoryTypePoolAlloc},
|
||||
non_host_visible::{
|
||||
StandardNonHostVisibleMemoryTypePool, StandardNonHostVisibleMemoryTypePoolAlloc,
|
||||
},
|
||||
pool::{StandardMemoryPool, StandardMemoryPoolAlloc},
|
||||
};
|
||||
use super::MemoryType;
|
||||
use crate::{
|
||||
device::{Device, DeviceOwned},
|
||||
memory::{
|
||||
device_memory::MemoryAllocateInfo, DedicatedAllocation, DeviceMemory, DeviceMemoryError,
|
||||
ExternalMemoryHandleTypes, MappedDeviceMemory, MemoryRequirements,
|
||||
},
|
||||
DeviceSize,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
mod host_visible;
|
||||
mod non_host_visible;
|
||||
mod pool;
|
||||
|
||||
// If the allocation size goes beyond this, then we perform a dedicated allocation which bypasses
|
||||
// the pool. This prevents the pool from overallocating a significant amount of memory.
|
||||
const MAX_POOL_ALLOC: DeviceSize = 256 * 1024 * 1024;
|
||||
|
||||
fn choose_allocation_memory_type<F>(
|
||||
device: &Arc<Device>,
|
||||
requirements: &MemoryRequirements,
|
||||
mut filter: F,
|
||||
map: MappingRequirement,
|
||||
) -> u32
|
||||
where
|
||||
F: FnMut(&MemoryType) -> AllocFromRequirementsFilter,
|
||||
{
|
||||
let mem_ty = {
|
||||
let mut filter = |ty: &MemoryType| {
|
||||
if map == MappingRequirement::Map && !ty.property_flags.host_visible {
|
||||
return AllocFromRequirementsFilter::Forbidden;
|
||||
}
|
||||
filter(ty)
|
||||
};
|
||||
let first_loop = device
|
||||
.physical_device()
|
||||
.memory_properties()
|
||||
.memory_types
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, t)| (i as u32, t, AllocFromRequirementsFilter::Preferred));
|
||||
let second_loop = device
|
||||
.physical_device()
|
||||
.memory_properties()
|
||||
.memory_types
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, t)| (i as u32, t, AllocFromRequirementsFilter::Allowed));
|
||||
first_loop
|
||||
.chain(second_loop)
|
||||
.filter(|(i, _, _)| (requirements.memory_type_bits & (1 << *i)) != 0)
|
||||
.find(|&(_, t, rq)| filter(t) == rq)
|
||||
.expect("Couldn't find a memory type to allocate from")
|
||||
.0
|
||||
};
|
||||
mem_ty
|
||||
}
|
||||
|
||||
/// Allocate dedicated memory with exportable fd.
|
||||
/// Memory pool memory always exports the same fd, thus dedicated is preferred.
|
||||
pub(crate) fn alloc_dedicated_with_exportable_fd<F>(
|
||||
device: Arc<Device>,
|
||||
requirements: &MemoryRequirements,
|
||||
_layout: AllocLayout,
|
||||
map: MappingRequirement,
|
||||
dedicated_allocation: DedicatedAllocation<'_>,
|
||||
filter: F,
|
||||
) -> Result<PotentialDedicatedAllocation<StandardMemoryPoolAlloc>, DeviceMemoryError>
|
||||
where
|
||||
F: FnMut(&MemoryType) -> AllocFromRequirementsFilter,
|
||||
{
|
||||
assert!(device.enabled_extensions().khr_external_memory_fd);
|
||||
assert!(device.enabled_extensions().khr_external_memory);
|
||||
|
||||
let memory_type_index = choose_allocation_memory_type(&device, requirements, filter, map);
|
||||
let memory = DeviceMemory::allocate(
|
||||
device,
|
||||
MemoryAllocateInfo {
|
||||
allocation_size: requirements.size,
|
||||
memory_type_index,
|
||||
export_handle_types: ExternalMemoryHandleTypes {
|
||||
opaque_fd: true,
|
||||
..ExternalMemoryHandleTypes::empty()
|
||||
},
|
||||
..MemoryAllocateInfo::dedicated_allocation(dedicated_allocation)
|
||||
},
|
||||
)?;
|
||||
|
||||
match map {
|
||||
MappingRequirement::Map => {
|
||||
let mapped_memory = MappedDeviceMemory::new(memory, 0..requirements.size)?;
|
||||
Ok(PotentialDedicatedAllocation::DedicatedMapped(mapped_memory))
|
||||
}
|
||||
MappingRequirement::DoNotMap => Ok(PotentialDedicatedAllocation::Dedicated(memory)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Pool of GPU-visible memory that can be allocated from.
|
||||
pub unsafe trait MemoryPool: DeviceOwned {
|
||||
/// Object that represents a single allocation. Its destructor should free the chunk.
|
||||
type Alloc: MemoryPoolAlloc;
|
||||
|
||||
/// Allocates memory from the pool.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Implementation safety:
|
||||
///
|
||||
/// - The returned object must match the requirements.
|
||||
/// - When a linear object is allocated next to an optimal object, it is mandatory that
|
||||
/// the boundary is aligned to the value of the `buffer_image_granularity` limit.
|
||||
///
|
||||
/// Note that it is not unsafe to *call* this function, but it is unsafe to bind the memory
|
||||
/// returned by this function to a resource.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// - Panics if `memory_type` doesn't belong to the same physical device as the device which
|
||||
/// was used to create this pool.
|
||||
/// - Panics if the memory type is not host-visible and `map` is `MappingRequirement::Map`.
|
||||
/// - Panics if `size` is 0.
|
||||
/// - Panics if `alignment` is 0.
|
||||
///
|
||||
fn alloc_generic(
|
||||
&self,
|
||||
memory_type_index: u32,
|
||||
size: DeviceSize,
|
||||
alignment: DeviceSize,
|
||||
layout: AllocLayout,
|
||||
map: MappingRequirement,
|
||||
) -> Result<Self::Alloc, DeviceMemoryError>;
|
||||
|
||||
/// Chooses a memory type and allocates memory from it.
|
||||
///
|
||||
/// Contrary to `alloc_generic`, this function may allocate a whole new block of memory
|
||||
/// dedicated to a resource based on `requirements.prefer_dedicated`.
|
||||
///
|
||||
/// `filter` can be used to restrict the memory types and to indicate which are preferred.
|
||||
/// If `map` is `MappingRequirement::Map`, then non-host-visible memory types will
|
||||
/// automatically be filtered out.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Implementation safety:
|
||||
///
|
||||
/// - The returned object must match the requirements.
|
||||
/// - When a linear object is allocated next to an optimal object, it is mandatory that
|
||||
/// the boundary is aligned to the value of the `buffer_image_granularity` limit.
|
||||
/// - If `dedicated` is not `None`, the returned memory must either not be dedicated or be
|
||||
/// dedicated to the resource that was passed.
|
||||
///
|
||||
/// Note that it is not unsafe to *call* this function, but it is unsafe to bind the memory
|
||||
/// returned by this function to a resource.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// - Panics if no memory type could be found, which can happen if `filter` is too restrictive.
|
||||
// TODO: ^ is this a good idea?
|
||||
/// - Panics if `size` is 0.
|
||||
/// - Panics if `alignment` is 0.
|
||||
///
|
||||
fn alloc_from_requirements<F>(
|
||||
&self,
|
||||
requirements: &MemoryRequirements,
|
||||
layout: AllocLayout,
|
||||
map: MappingRequirement,
|
||||
dedicated_allocation: Option<DedicatedAllocation<'_>>,
|
||||
filter: F,
|
||||
) -> Result<PotentialDedicatedAllocation<Self::Alloc>, DeviceMemoryError>
|
||||
where
|
||||
F: FnMut(&MemoryType) -> AllocFromRequirementsFilter,
|
||||
{
|
||||
// Choose a suitable memory type.
|
||||
let memory_type_index =
|
||||
choose_allocation_memory_type(self.device(), requirements, filter, map);
|
||||
|
||||
// Redirect to `self.alloc_generic` if we don't perform a dedicated allocation.
|
||||
if !requirements.prefer_dedicated && requirements.size <= MAX_POOL_ALLOC {
|
||||
let alloc = self.alloc_generic(
|
||||
memory_type_index,
|
||||
requirements.size,
|
||||
requirements.alignment,
|
||||
layout,
|
||||
map,
|
||||
)?;
|
||||
return Ok(alloc.into());
|
||||
}
|
||||
if dedicated_allocation.is_none() {
|
||||
let alloc = self.alloc_generic(
|
||||
memory_type_index,
|
||||
requirements.size,
|
||||
requirements.alignment,
|
||||
layout,
|
||||
map,
|
||||
)?;
|
||||
return Ok(alloc.into());
|
||||
}
|
||||
|
||||
// If we reach here, then we perform a dedicated alloc.
|
||||
let memory = DeviceMemory::allocate(
|
||||
self.device().clone(),
|
||||
MemoryAllocateInfo {
|
||||
allocation_size: requirements.size,
|
||||
memory_type_index,
|
||||
dedicated_allocation,
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
|
||||
match map {
|
||||
MappingRequirement::Map => {
|
||||
let mapped_memory = MappedDeviceMemory::new(memory, 0..requirements.size)?;
|
||||
Ok(PotentialDedicatedAllocation::DedicatedMapped(mapped_memory))
|
||||
}
|
||||
MappingRequirement::DoNotMap => Ok(PotentialDedicatedAllocation::Dedicated(memory)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum AllocFromRequirementsFilter {
|
||||
Preferred,
|
||||
Allowed,
|
||||
Forbidden,
|
||||
}
|
||||
|
||||
/// Object that represents a single allocation. Its destructor should free the chunk.
|
||||
pub unsafe trait MemoryPoolAlloc: Send + Sync {
|
||||
/// Returns the memory object from which this is allocated. Returns `None` if the memory is
|
||||
/// not mapped.
|
||||
fn mapped_memory(&self) -> Option<&MappedDeviceMemory>;
|
||||
|
||||
/// Returns the memory object from which this is allocated.
|
||||
fn memory(&self) -> &DeviceMemory;
|
||||
|
||||
/// Returns the offset at the start of the memory where the first byte of this allocation
|
||||
/// resides.
|
||||
fn offset(&self) -> DeviceSize;
|
||||
}
|
||||
|
||||
/// Whether an allocation should map the memory or not.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum MappingRequirement {
|
||||
/// Should map.
|
||||
Map,
|
||||
/// Shouldn't map.
|
||||
DoNotMap,
|
||||
}
|
||||
|
||||
/// Layout of the object being allocated.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum AllocLayout {
|
||||
/// The object has a linear layout.
|
||||
Linear,
|
||||
/// The object has an optimal layout.
|
||||
Optimal,
|
||||
}
|
||||
|
||||
/// Enumeration that can contain either a generic allocation coming from a pool, or a dedicated
|
||||
/// allocation for one specific resource.
|
||||
#[derive(Debug)]
|
||||
pub enum PotentialDedicatedAllocation<A> {
|
||||
Generic(A),
|
||||
Dedicated(DeviceMemory),
|
||||
DedicatedMapped(MappedDeviceMemory),
|
||||
}
|
||||
|
||||
unsafe impl<A> MemoryPoolAlloc for PotentialDedicatedAllocation<A>
|
||||
where
|
||||
A: MemoryPoolAlloc,
|
||||
{
|
||||
#[inline]
|
||||
fn mapped_memory(&self) -> Option<&MappedDeviceMemory> {
|
||||
match *self {
|
||||
PotentialDedicatedAllocation::Generic(ref alloc) => alloc.mapped_memory(),
|
||||
PotentialDedicatedAllocation::Dedicated(_) => None,
|
||||
PotentialDedicatedAllocation::DedicatedMapped(ref mem) => Some(mem),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn memory(&self) -> &DeviceMemory {
|
||||
match *self {
|
||||
PotentialDedicatedAllocation::Generic(ref alloc) => alloc.memory(),
|
||||
PotentialDedicatedAllocation::Dedicated(ref mem) => mem,
|
||||
PotentialDedicatedAllocation::DedicatedMapped(ref mem) => mem.as_ref(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn offset(&self) -> DeviceSize {
|
||||
match *self {
|
||||
PotentialDedicatedAllocation::Generic(ref alloc) => alloc.offset(),
|
||||
PotentialDedicatedAllocation::Dedicated(_) => 0,
|
||||
PotentialDedicatedAllocation::DedicatedMapped(_) => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> From<A> for PotentialDedicatedAllocation<A> {
|
||||
#[inline]
|
||||
fn from(alloc: A) -> PotentialDedicatedAllocation<A> {
|
||||
PotentialDedicatedAllocation::Generic(alloc)
|
||||
}
|
||||
}
|
@ -1,169 +0,0 @@
|
||||
// Copyright (c) 2016 The vulkano developers
|
||||
// Licensed under the Apache License, Version 2.0
|
||||
// <LICENSE-APACHE or
|
||||
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
|
||||
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
|
||||
// at your option. All files in the project carrying such
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use crate::{
|
||||
device::Device,
|
||||
memory::{device_memory::MemoryAllocateInfo, DeviceMemory, DeviceMemoryError},
|
||||
DeviceSize,
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
use std::{cmp, ops::Range, sync::Arc};
|
||||
|
||||
/// Memory pool that operates on a given memory type.
|
||||
#[derive(Debug)]
|
||||
pub struct StandardNonHostVisibleMemoryTypePool {
|
||||
device: Arc<Device>,
|
||||
memory_type_index: u32,
|
||||
// TODO: obviously very inefficient
|
||||
occupied: Mutex<Vec<(Arc<DeviceMemory>, Vec<Range<DeviceSize>>)>>,
|
||||
}
|
||||
|
||||
impl StandardNonHostVisibleMemoryTypePool {
|
||||
/// Creates a new pool that will operate on the given memory type.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// - Panics if `memory_type_index` is out of range.
|
||||
#[inline]
|
||||
pub fn new(
|
||||
device: Arc<Device>,
|
||||
memory_type_index: u32,
|
||||
) -> Arc<StandardNonHostVisibleMemoryTypePool> {
|
||||
let _ =
|
||||
&device.physical_device().memory_properties().memory_types[memory_type_index as usize];
|
||||
|
||||
Arc::new(StandardNonHostVisibleMemoryTypePool {
|
||||
device,
|
||||
memory_type_index,
|
||||
occupied: Mutex::new(Vec::new()),
|
||||
})
|
||||
}
|
||||
|
||||
/// Allocates memory from the pool.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// - Panics if `size` is 0.
|
||||
/// - Panics if `alignment` is 0.
|
||||
///
|
||||
pub fn alloc(
|
||||
self: &Arc<Self>,
|
||||
size: DeviceSize,
|
||||
alignment: DeviceSize,
|
||||
) -> Result<StandardNonHostVisibleMemoryTypePoolAlloc, DeviceMemoryError> {
|
||||
assert!(size != 0);
|
||||
assert!(alignment != 0);
|
||||
|
||||
#[inline]
|
||||
fn align(val: DeviceSize, al: DeviceSize) -> DeviceSize {
|
||||
al * (1 + (val - 1) / al)
|
||||
}
|
||||
|
||||
// Find a location.
|
||||
let mut occupied = self.occupied.lock();
|
||||
|
||||
// Try finding an entry in already-allocated chunks.
|
||||
for &mut (ref dev_mem, ref mut entries) in occupied.iter_mut() {
|
||||
// Try find some free space in-between two entries.
|
||||
for i in 0..entries.len().saturating_sub(1) {
|
||||
let entry1 = entries[i].clone();
|
||||
let entry1_end = align(entry1.end, alignment);
|
||||
let entry2 = entries[i + 1].clone();
|
||||
if entry1_end + size <= entry2.start {
|
||||
entries.insert(i + 1, entry1_end..entry1_end + size);
|
||||
return Ok(StandardNonHostVisibleMemoryTypePoolAlloc {
|
||||
pool: self.clone(),
|
||||
memory: dev_mem.clone(),
|
||||
offset: entry1_end,
|
||||
size,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Try append at the end.
|
||||
let last_end = entries.last().map(|e| align(e.end, alignment)).unwrap_or(0);
|
||||
if last_end + size <= dev_mem.allocation_size() {
|
||||
entries.push(last_end..last_end + size);
|
||||
return Ok(StandardNonHostVisibleMemoryTypePoolAlloc {
|
||||
pool: self.clone(),
|
||||
memory: dev_mem.clone(),
|
||||
offset: last_end,
|
||||
size,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// We need to allocate a new block.
|
||||
let new_block = {
|
||||
const MIN_BLOCK_SIZE: DeviceSize = 8 * 1024 * 1024; // 8 MB
|
||||
let allocation_size = cmp::max(MIN_BLOCK_SIZE, size.next_power_of_two());
|
||||
let new_block = DeviceMemory::allocate(
|
||||
self.device.clone(),
|
||||
MemoryAllocateInfo {
|
||||
allocation_size,
|
||||
memory_type_index: self.memory_type_index,
|
||||
..Default::default()
|
||||
},
|
||||
)?;
|
||||
Arc::new(new_block)
|
||||
};
|
||||
|
||||
occupied.push((new_block.clone(), vec![0..size]));
|
||||
Ok(StandardNonHostVisibleMemoryTypePoolAlloc {
|
||||
pool: self.clone(),
|
||||
memory: new_block,
|
||||
offset: 0,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns the index of the memory type this pool operates on.
|
||||
#[inline]
|
||||
pub fn memory_type_index(&self) -> u32 {
|
||||
self.memory_type_index
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct StandardNonHostVisibleMemoryTypePoolAlloc {
|
||||
pool: Arc<StandardNonHostVisibleMemoryTypePool>,
|
||||
memory: Arc<DeviceMemory>,
|
||||
offset: DeviceSize,
|
||||
size: DeviceSize,
|
||||
}
|
||||
|
||||
impl StandardNonHostVisibleMemoryTypePoolAlloc {
|
||||
#[inline]
|
||||
pub fn memory(&self) -> &DeviceMemory {
|
||||
&self.memory
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn offset(&self) -> DeviceSize {
|
||||
self.offset
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn size(&self) -> DeviceSize {
|
||||
self.size
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for StandardNonHostVisibleMemoryTypePoolAlloc {
|
||||
fn drop(&mut self) {
|
||||
let mut occupied = self.pool.occupied.lock();
|
||||
|
||||
let entries = occupied
|
||||
.iter_mut()
|
||||
.find(|e| &*e.0 as *const DeviceMemory == &*self.memory)
|
||||
.unwrap();
|
||||
|
||||
entries.1.retain(|e| e.start != self.offset);
|
||||
}
|
||||
}
|
@ -1,206 +0,0 @@
|
||||
// Copyright (c) 2016 The vulkano developers
|
||||
// Licensed under the Apache License, Version 2.0
|
||||
// <LICENSE-APACHE or
|
||||
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
|
||||
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
|
||||
// at your option. All files in the project carrying such
|
||||
// notice may not be copied, modified, or distributed except
|
||||
// according to those terms.
|
||||
|
||||
use crate::{
|
||||
device::{Device, DeviceOwned},
|
||||
memory::{
|
||||
pool::{
|
||||
AllocLayout, MappingRequirement, MemoryPool, MemoryPoolAlloc,
|
||||
StandardHostVisibleMemoryTypePool, StandardHostVisibleMemoryTypePoolAlloc,
|
||||
StandardNonHostVisibleMemoryTypePool, StandardNonHostVisibleMemoryTypePoolAlloc,
|
||||
},
|
||||
DeviceMemory, DeviceMemoryError, MappedDeviceMemory,
|
||||
},
|
||||
DeviceSize,
|
||||
};
|
||||
use parking_lot::Mutex;
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct StandardMemoryPool {
|
||||
device: Arc<Device>,
|
||||
|
||||
// For each memory type index, stores the associated pool.
|
||||
pools: Mutex<HashMap<(u32, AllocLayout, MappingRequirement), Pool>>,
|
||||
}
|
||||
|
||||
impl StandardMemoryPool {
|
||||
/// Creates a new pool.
|
||||
#[inline]
|
||||
pub fn new(device: Arc<Device>) -> Arc<StandardMemoryPool> {
|
||||
let cap = device
|
||||
.physical_device()
|
||||
.memory_properties()
|
||||
.memory_types
|
||||
.len();
|
||||
|
||||
Arc::new(StandardMemoryPool {
|
||||
device,
|
||||
pools: Mutex::new(HashMap::with_capacity(cap)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn generic_allocation(
|
||||
mem_pool: Arc<StandardMemoryPool>,
|
||||
memory_type_index: u32,
|
||||
size: DeviceSize,
|
||||
alignment: DeviceSize,
|
||||
layout: AllocLayout,
|
||||
map: MappingRequirement,
|
||||
) -> Result<StandardMemoryPoolAlloc, DeviceMemoryError> {
|
||||
let mut pools = mem_pool.pools.lock();
|
||||
|
||||
let memory_properties = mem_pool.device().physical_device().memory_properties();
|
||||
let memory_type = memory_properties
|
||||
.memory_types
|
||||
.get(memory_type_index as usize)
|
||||
.ok_or(DeviceMemoryError::MemoryTypeIndexOutOfRange {
|
||||
memory_type_index,
|
||||
memory_type_count: memory_properties.memory_types.len() as u32,
|
||||
})?;
|
||||
|
||||
let memory_type_host_visible = memory_type.property_flags.host_visible;
|
||||
assert!(memory_type_host_visible || map == MappingRequirement::DoNotMap);
|
||||
|
||||
match pools.entry((memory_type_index, layout, map)) {
|
||||
Entry::Occupied(entry) => match *entry.get() {
|
||||
Pool::HostVisible(ref pool) => {
|
||||
let alloc = pool.alloc(size, alignment)?;
|
||||
let inner = StandardMemoryPoolAllocInner::HostVisible(alloc);
|
||||
Ok(StandardMemoryPoolAlloc {
|
||||
inner,
|
||||
_pool: mem_pool.clone(),
|
||||
})
|
||||
}
|
||||
Pool::NonHostVisible(ref pool) => {
|
||||
let alloc = pool.alloc(size, alignment)?;
|
||||
let inner = StandardMemoryPoolAllocInner::NonHostVisible(alloc);
|
||||
Ok(StandardMemoryPoolAlloc {
|
||||
inner,
|
||||
_pool: mem_pool.clone(),
|
||||
})
|
||||
}
|
||||
},
|
||||
|
||||
Entry::Vacant(entry) => {
|
||||
if memory_type_host_visible {
|
||||
let pool = StandardHostVisibleMemoryTypePool::new(
|
||||
mem_pool.device.clone(),
|
||||
memory_type_index,
|
||||
);
|
||||
entry.insert(Pool::HostVisible(pool.clone()));
|
||||
let alloc = pool.alloc(size, alignment)?;
|
||||
let inner = StandardMemoryPoolAllocInner::HostVisible(alloc);
|
||||
Ok(StandardMemoryPoolAlloc {
|
||||
inner,
|
||||
_pool: mem_pool.clone(),
|
||||
})
|
||||
} else {
|
||||
let pool = StandardNonHostVisibleMemoryTypePool::new(
|
||||
mem_pool.device.clone(),
|
||||
memory_type_index,
|
||||
);
|
||||
entry.insert(Pool::NonHostVisible(pool.clone()));
|
||||
let alloc = pool.alloc(size, alignment)?;
|
||||
let inner = StandardMemoryPoolAllocInner::NonHostVisible(alloc);
|
||||
Ok(StandardMemoryPoolAlloc {
|
||||
inner,
|
||||
_pool: mem_pool.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl MemoryPool for Arc<StandardMemoryPool> {
|
||||
type Alloc = StandardMemoryPoolAlloc;
|
||||
|
||||
fn alloc_generic(
|
||||
&self,
|
||||
memory_type_index: u32,
|
||||
size: DeviceSize,
|
||||
alignment: DeviceSize,
|
||||
layout: AllocLayout,
|
||||
map: MappingRequirement,
|
||||
) -> Result<StandardMemoryPoolAlloc, DeviceMemoryError> {
|
||||
generic_allocation(
|
||||
self.clone(),
|
||||
memory_type_index,
|
||||
size,
|
||||
alignment,
|
||||
layout,
|
||||
map,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl DeviceOwned for StandardMemoryPool {
|
||||
#[inline]
|
||||
fn device(&self) -> &Arc<Device> {
|
||||
&self.device
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Pool {
|
||||
HostVisible(Arc<StandardHostVisibleMemoryTypePool>),
|
||||
NonHostVisible(Arc<StandardNonHostVisibleMemoryTypePool>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct StandardMemoryPoolAlloc {
|
||||
inner: StandardMemoryPoolAllocInner,
|
||||
_pool: Arc<StandardMemoryPool>,
|
||||
}
|
||||
|
||||
impl StandardMemoryPoolAlloc {
|
||||
#[inline]
|
||||
pub fn size(&self) -> DeviceSize {
|
||||
match self.inner {
|
||||
StandardMemoryPoolAllocInner::NonHostVisible(ref mem) => mem.size(),
|
||||
StandardMemoryPoolAllocInner::HostVisible(ref mem) => mem.size(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl MemoryPoolAlloc for StandardMemoryPoolAlloc {
|
||||
#[inline]
|
||||
fn memory(&self) -> &DeviceMemory {
|
||||
match self.inner {
|
||||
StandardMemoryPoolAllocInner::NonHostVisible(ref mem) => mem.memory(),
|
||||
StandardMemoryPoolAllocInner::HostVisible(ref mem) => mem.memory().as_ref(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn mapped_memory(&self) -> Option<&MappedDeviceMemory> {
|
||||
match self.inner {
|
||||
StandardMemoryPoolAllocInner::NonHostVisible(_) => None,
|
||||
StandardMemoryPoolAllocInner::HostVisible(ref mem) => Some(mem.memory()),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn offset(&self) -> DeviceSize {
|
||||
match self.inner {
|
||||
StandardMemoryPoolAllocInner::NonHostVisible(ref mem) => mem.offset(),
|
||||
StandardMemoryPoolAllocInner::HostVisible(ref mem) => mem.offset(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum StandardMemoryPoolAllocInner {
|
||||
NonHostVisible(StandardNonHostVisibleMemoryTypePoolAlloc),
|
||||
HostVisible(StandardHostVisibleMemoryTypePoolAlloc),
|
||||
}
|
@ -409,6 +409,7 @@ mod tests {
|
||||
descriptor_set::{
|
||||
allocator::StandardDescriptorSetAllocator, PersistentDescriptorSet, WriteDescriptorSet,
|
||||
},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
|
||||
shader::{ShaderModule, SpecializationConstants, SpecializationMapEntry},
|
||||
sync::{now, GpuFuture},
|
||||
@ -491,8 +492,9 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
|
||||
let data_buffer = CpuAccessibleBuffer::from_data(
|
||||
device.clone(),
|
||||
&memory_allocator,
|
||||
BufferUsage {
|
||||
storage_buffer: true,
|
||||
..BufferUsage::empty()
|
||||
|
@ -720,6 +720,7 @@ mod tests {
|
||||
use crate::{
|
||||
format::Format,
|
||||
image::{attachment::AttachmentImage, view::ImageView},
|
||||
memory::allocator::StandardMemoryAllocator,
|
||||
render_pass::{Framebuffer, FramebufferCreateInfo, FramebufferCreationError, RenderPass},
|
||||
};
|
||||
|
||||
@ -743,8 +744,9 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let view = ImageView::new_default(
|
||||
AttachmentImage::new(device, [1024, 768], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
AttachmentImage::new(&memory_allocator, [1024, 768], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let _ = Framebuffer::new(
|
||||
@ -810,8 +812,9 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let view = ImageView::new_default(
|
||||
AttachmentImage::new(device, [1024, 768], Format::R8_UNORM).unwrap(),
|
||||
AttachmentImage::new(&memory_allocator, [1024, 768], Format::R8_UNORM).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -849,8 +852,9 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let view = ImageView::new_default(
|
||||
AttachmentImage::new(device, [600, 600], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
AttachmentImage::new(&memory_allocator, [600, 600], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -886,8 +890,9 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let view = ImageView::new_default(
|
||||
AttachmentImage::new(device, [512, 700], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
AttachmentImage::new(&memory_allocator, [512, 700], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -931,12 +936,13 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let a = ImageView::new_default(
|
||||
AttachmentImage::new(device.clone(), [256, 512], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
AttachmentImage::new(&memory_allocator, [256, 512], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let b = ImageView::new_default(
|
||||
AttachmentImage::new(device, [512, 128], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
AttachmentImage::new(&memory_allocator, [512, 128], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -981,8 +987,9 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let view = ImageView::new_default(
|
||||
AttachmentImage::new(device, [256, 512], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
AttachmentImage::new(&memory_allocator, [256, 512], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
@ -1023,12 +1030,13 @@ mod tests {
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let memory_allocator = StandardMemoryAllocator::new_default(device);
|
||||
let a = ImageView::new_default(
|
||||
AttachmentImage::new(device.clone(), [256, 512], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
AttachmentImage::new(&memory_allocator, [256, 512], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
let b = ImageView::new_default(
|
||||
AttachmentImage::new(device, [256, 512], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
AttachmentImage::new(&memory_allocator, [256, 512], Format::R8G8B8A8_UNORM).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
//! # let device: std::sync::Arc<vulkano::device::Device> = return;
|
||||
//! # let image_data: Vec<u8> = return;
|
||||
//! # let queue: std::sync::Arc<vulkano::device::Queue> = return;
|
||||
//! # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return;
|
||||
//! # let descriptor_set_allocator: vulkano::descriptor_set::allocator::StandardDescriptorSetAllocator = return;
|
||||
//! # let mut command_buffer_builder: vulkano::command_buffer::AutoCommandBufferBuilder<vulkano::command_buffer::PrimaryAutoCommandBuffer> = return;
|
||||
//! use vulkano::descriptor_set::{PersistentDescriptorSet, WriteDescriptorSet};
|
||||
@ -67,6 +68,7 @@
|
||||
//! ).unwrap();
|
||||
//!
|
||||
//! let image = ImmutableImage::from_iter(
|
||||
//! &memory_allocator,
|
||||
//! image_data,
|
||||
//! ImageDimensions::Dim2d { width: 1920, height: 1080, array_layers: 1 },
|
||||
//! MipmapsCount::One,
|
||||
|
Loading…
Reference in New Issue
Block a user