From 5578bf30da039c36a18a2166c127ea2a18eb0757 Mon Sep 17 00:00:00 2001 From: marc0246 <40955683+marc0246@users.noreply.github.com> Date: Sun, 3 Sep 2023 13:09:07 +0200 Subject: [PATCH] Memory (sub)allocation API 2.0 (#2316) * Excommunicate `PoolAllocator` * Switch to manual deallocation * Move `ResourceMemory` away from the `suballocator` module * Remove `SuballocatorError::BlockSizeExceeded` * Fix examples * Fix atom size * Address safety TODOs * Nice English you got there bro --- examples/src/bin/async-update.rs | 8 +- examples/src/bin/basic-compute-shader.rs | 5 +- .../deferred/frame/ambient_lighting_system.rs | 4 +- .../frame/directional_lighting_system.rs | 4 +- .../deferred/frame/point_lighting_system.rs | 4 +- examples/src/bin/deferred/frame/system.rs | 18 +- examples/src/bin/deferred/main.rs | 2 +- .../src/bin/deferred/triangle_draw_system.rs | 2 +- examples/src/bin/dynamic-buffers.rs | 8 +- examples/src/bin/dynamic-local-size.rs | 8 +- examples/src/bin/gl-interop.rs | 13 +- examples/src/bin/image-self-copy-blit/main.rs | 8 +- examples/src/bin/image/main.rs | 8 +- examples/src/bin/immutable-sampler/main.rs | 8 +- examples/src/bin/instancing.rs | 6 +- examples/src/bin/interactive_fractal/app.rs | 2 +- .../fractal_compute_pipeline.rs | 4 +- .../pixels_draw_pipeline.rs | 6 +- .../interactive_fractal/place_over_frame.rs | 4 +- examples/src/bin/msaa-renderpass.rs | 12 +- examples/src/bin/multi-window.rs | 4 +- .../multi_window_game_of_life/game_of_life.rs | 10 +- .../multi_window_game_of_life/pixels_draw.rs | 4 +- examples/src/bin/multiview.rs | 10 +- examples/src/bin/occlusion-query.rs | 10 +- examples/src/bin/push-constants.rs | 5 +- examples/src/bin/push-descriptors/main.rs | 8 +- examples/src/bin/runtime-shader/main.rs | 4 +- examples/src/bin/runtime_array/main.rs | 12 +- examples/src/bin/self-copy-buffer.rs | 5 +- examples/src/bin/shader-include/main.rs | 5 +- examples/src/bin/shader-types-sharing.rs | 4 +- examples/src/bin/simple-particles.rs | 6 +- examples/src/bin/specialization-constants.rs | 5 +- examples/src/bin/teapot/main.rs | 16 +- examples/src/bin/tessellation.rs | 4 +- examples/src/bin/texture_array/main.rs | 8 +- examples/src/bin/triangle-v1_3.rs | 4 +- examples/src/bin/triangle.rs | 4 +- vulkano-util/src/renderer.rs | 2 +- vulkano/src/buffer/allocator.rs | 12 +- vulkano/src/buffer/mod.rs | 29 +- vulkano/src/buffer/subbuffer.rs | 21 +- vulkano/src/buffer/sys.rs | 17 +- vulkano/src/buffer/view.rs | 25 +- vulkano/src/command_buffer/auto/mod.rs | 23 +- vulkano/src/image/mod.rs | 13 +- vulkano/src/image/sampler/ycbcr.rs | 4 +- vulkano/src/image/sys.rs | 14 +- vulkano/src/memory/allocator/mod.rs | 495 ++-- vulkano/src/memory/allocator/suballocator.rs | 2308 ++++------------- vulkano/src/memory/device_memory.rs | 12 +- vulkano/src/memory/mod.rs | 339 ++- vulkano/src/pipeline/compute.rs | 9 +- vulkano/src/render_pass/framebuffer.rs | 33 +- 55 files changed, 1418 insertions(+), 2200 deletions(-) diff --git a/examples/src/bin/async-update.rs b/examples/src/bin/async-update.rs index e0fb6b0e..dc918298 100644 --- a/examples/src/bin/async-update.rs +++ b/examples/src/bin/async-update.rs @@ -274,7 +274,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -293,7 +293,7 @@ fn main() { let uniform_buffers = (0..swapchain.image_count()) .map(|_| { Buffer::new_sized( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::UNIFORM_BUFFER, ..Default::default() @@ -312,7 +312,7 @@ fn main() { // is used exclusively for writing, swapping the two after each update. let textures = [(); 2].map(|_| { Image::new( - &memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, @@ -714,7 +714,7 @@ fn run_worker( // out-of-date texture is the current up-to-date texture and vice-versa, cycle repeating. let staging_buffers = [(); 2].map(|_| { Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC, ..Default::default() diff --git a/examples/src/bin/basic-compute-shader.rs b/examples/src/bin/basic-compute-shader.rs index eb790139..58d6a060 100644 --- a/examples/src/bin/basic-compute-shader.rs +++ b/examples/src/bin/basic-compute-shader.rs @@ -13,6 +13,7 @@ // been more or more used for general-purpose operations as well. This is called "General-Purpose // GPU", or *GPGPU*. This is what this example demonstrates. +use std::sync::Arc; use vulkano::{ buffer::{Buffer, BufferCreateInfo, BufferUsage}, command_buffer::{ @@ -158,14 +159,14 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default()); // We start by creating the buffer that will store the data. let data_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::STORAGE_BUFFER, ..Default::default() diff --git a/examples/src/bin/deferred/frame/ambient_lighting_system.rs b/examples/src/bin/deferred/frame/ambient_lighting_system.rs index babd8166..53536cfd 100644 --- a/examples/src/bin/deferred/frame/ambient_lighting_system.rs +++ b/examples/src/bin/deferred/frame/ambient_lighting_system.rs @@ -19,7 +19,7 @@ use vulkano::{ }, device::Queue, image::view::ImageView, - memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter}, + memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator}, pipeline::{ graphics::{ color_blend::{AttachmentBlend, BlendFactor, BlendOp, ColorBlendState}, @@ -54,7 +54,7 @@ impl AmbientLightingSystem { pub fn new( gfx_queue: Arc, subpass: Subpass, - memory_allocator: &impl MemoryAllocator, + memory_allocator: Arc, command_buffer_allocator: Arc, descriptor_set_allocator: Arc, ) -> AmbientLightingSystem { diff --git a/examples/src/bin/deferred/frame/directional_lighting_system.rs b/examples/src/bin/deferred/frame/directional_lighting_system.rs index 0d8c8853..802fd559 100644 --- a/examples/src/bin/deferred/frame/directional_lighting_system.rs +++ b/examples/src/bin/deferred/frame/directional_lighting_system.rs @@ -20,7 +20,7 @@ use vulkano::{ }, device::Queue, image::view::ImageView, - memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter}, + memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator}, pipeline::{ graphics::{ color_blend::{AttachmentBlend, BlendFactor, BlendOp, ColorBlendState}, @@ -55,7 +55,7 @@ impl DirectionalLightingSystem { pub fn new( gfx_queue: Arc, subpass: Subpass, - memory_allocator: &impl MemoryAllocator, + memory_allocator: Arc, command_buffer_allocator: Arc, descriptor_set_allocator: Arc, ) -> DirectionalLightingSystem { diff --git a/examples/src/bin/deferred/frame/point_lighting_system.rs b/examples/src/bin/deferred/frame/point_lighting_system.rs index 2d2ac08e..3e9b0b27 100644 --- a/examples/src/bin/deferred/frame/point_lighting_system.rs +++ b/examples/src/bin/deferred/frame/point_lighting_system.rs @@ -20,7 +20,7 @@ use vulkano::{ }, device::Queue, image::view::ImageView, - memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter}, + memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator}, pipeline::{ graphics::{ color_blend::{AttachmentBlend, BlendFactor, BlendOp, ColorBlendState}, @@ -54,7 +54,7 @@ impl PointLightingSystem { pub fn new( gfx_queue: Arc, subpass: Subpass, - memory_allocator: &impl MemoryAllocator, + memory_allocator: Arc, command_buffer_allocator: Arc, descriptor_set_allocator: Arc, ) -> PointLightingSystem { diff --git a/examples/src/bin/deferred/frame/system.rs b/examples/src/bin/deferred/frame/system.rs index 0337e3a7..3d58ea77 100644 --- a/examples/src/bin/deferred/frame/system.rs +++ b/examples/src/bin/deferred/frame/system.rs @@ -155,7 +155,7 @@ impl FrameSystem { // will be replaced the first time we call `frame()`. let diffuse_buffer = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::A2B10G10R10_UNORM_PACK32, @@ -172,7 +172,7 @@ impl FrameSystem { .unwrap(); let normals_buffer = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R16G16B16A16_SFLOAT, @@ -187,7 +187,7 @@ impl FrameSystem { .unwrap(); let depth_buffer = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::D16_UNORM, @@ -211,21 +211,21 @@ impl FrameSystem { let ambient_lighting_system = AmbientLightingSystem::new( gfx_queue.clone(), lighting_subpass.clone(), - &memory_allocator, + memory_allocator.clone(), command_buffer_allocator.clone(), descriptor_set_allocator.clone(), ); let directional_lighting_system = DirectionalLightingSystem::new( gfx_queue.clone(), lighting_subpass.clone(), - &memory_allocator, + memory_allocator.clone(), command_buffer_allocator.clone(), descriptor_set_allocator.clone(), ); let point_lighting_system = PointLightingSystem::new( gfx_queue.clone(), lighting_subpass, - &memory_allocator, + memory_allocator.clone(), command_buffer_allocator.clone(), descriptor_set_allocator, ); @@ -281,7 +281,7 @@ impl FrameSystem { // render pass their content becomes undefined. self.diffuse_buffer = ImageView::new_default( Image::new( - &self.memory_allocator, + self.memory_allocator.clone(), ImageCreateInfo { extent, format: Format::A2B10G10R10_UNORM_PACK32, @@ -297,7 +297,7 @@ impl FrameSystem { .unwrap(); self.normals_buffer = ImageView::new_default( Image::new( - &self.memory_allocator, + self.memory_allocator.clone(), ImageCreateInfo { extent, format: Format::R16G16B16A16_SFLOAT, @@ -313,7 +313,7 @@ impl FrameSystem { .unwrap(); self.depth_buffer = ImageView::new_default( Image::new( - &self.memory_allocator, + self.memory_allocator.clone(), ImageCreateInfo { extent, format: Format::D16_UNORM, diff --git a/examples/src/bin/deferred/main.rs b/examples/src/bin/deferred/main.rs index 22e8a6c6..11f8b356 100644 --- a/examples/src/bin/deferred/main.rs +++ b/examples/src/bin/deferred/main.rs @@ -174,7 +174,7 @@ fn main() { let triangle_draw_system = TriangleDrawSystem::new( queue.clone(), frame_system.deferred_subpass(), - &memory_allocator, + memory_allocator.clone(), command_buffer_allocator, ); diff --git a/examples/src/bin/deferred/triangle_draw_system.rs b/examples/src/bin/deferred/triangle_draw_system.rs index 5966277a..9a47416d 100644 --- a/examples/src/bin/deferred/triangle_draw_system.rs +++ b/examples/src/bin/deferred/triangle_draw_system.rs @@ -46,7 +46,7 @@ impl TriangleDrawSystem { pub fn new( gfx_queue: Arc, subpass: Subpass, - memory_allocator: &StandardMemoryAllocator, + memory_allocator: Arc, command_buffer_allocator: Arc, ) -> TriangleDrawSystem { let vertices = [ diff --git a/examples/src/bin/dynamic-buffers.rs b/examples/src/bin/dynamic-buffers.rs index 9c238ad5..2fb3da4b 100644 --- a/examples/src/bin/dynamic-buffers.rs +++ b/examples/src/bin/dynamic-buffers.rs @@ -13,7 +13,7 @@ // Each draw or dispatch call can specify an offset into the buffer to read object data from, // without having to rebind descriptor sets. -use std::{iter::repeat, mem::size_of}; +use std::{iter::repeat, mem::size_of, sync::Arc}; use vulkano::{ buffer::{Buffer, BufferCreateInfo, BufferUsage}, command_buffer::{ @@ -153,7 +153,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default()); @@ -188,7 +188,7 @@ fn main() { }; let input_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::UNIFORM_BUFFER, ..Default::default() @@ -203,7 +203,7 @@ fn main() { .unwrap(); let output_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::STORAGE_BUFFER, ..Default::default() diff --git a/examples/src/bin/dynamic-local-size.rs b/examples/src/bin/dynamic-local-size.rs index bf267535..cc6df296 100644 --- a/examples/src/bin/dynamic-local-size.rs +++ b/examples/src/bin/dynamic-local-size.rs @@ -13,7 +13,7 @@ // Workgroup parallelism capabilities vary between GPUs and setting them properly is important to // achieve the maximal performance that particular device can provide. -use std::{fs::File, io::BufWriter, path::Path}; +use std::{fs::File, io::BufWriter, path::Path, sync::Arc}; use vulkano::{ buffer::{Buffer, BufferCreateInfo, BufferUsage}, command_buffer::{ @@ -209,13 +209,13 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default()); let image = Image::new( - &memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, @@ -238,7 +238,7 @@ fn main() { .unwrap(); let buf = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::TRANSFER_DST, ..Default::default() diff --git a/examples/src/bin/gl-interop.rs b/examples/src/bin/gl-interop.rs index a58a54fd..8bc1faae 100644 --- a/examples/src/bin/gl-interop.rs +++ b/examples/src/bin/gl-interop.rs @@ -41,11 +41,10 @@ mod linux { }, memory::{ allocator::{ - AllocationCreateInfo, MemoryAlloc, MemoryAllocator, MemoryTypeFilter, - StandardMemoryAllocator, + AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter, StandardMemoryAllocator, }, DedicatedAllocation, DeviceMemory, ExternalMemoryHandleType, ExternalMemoryHandleTypes, - MemoryAllocateInfo, + MemoryAllocateInfo, ResourceMemory, }, pipeline::{ graphics::{ @@ -159,7 +158,7 @@ mod linux { let image = Arc::new( raw_image - .bind_memory([MemoryAlloc::new(image_memory)]) + .bind_memory([ResourceMemory::new_dedicated(image_memory)]) .map_err(|(err, _, _)| err) .unwrap(), ); @@ -464,7 +463,7 @@ mod linux { Vec>, Arc, Arc, - StandardMemoryAllocator, + Arc, Subbuffer<[MyVertex]>, ) { let library = VulkanLibrary::new().unwrap(); @@ -600,7 +599,7 @@ mod linux { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let vertices = [ MyVertex { @@ -617,7 +616,7 @@ mod linux { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() diff --git a/examples/src/bin/image-self-copy-blit/main.rs b/examples/src/bin/image-self-copy-blit/main.rs index 58a7c9b6..a33c31b3 100644 --- a/examples/src/bin/image-self-copy-blit/main.rs +++ b/examples/src/bin/image-self-copy-blit/main.rs @@ -156,7 +156,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); #[derive(BufferContents, Vertex)] #[repr(C)] @@ -180,7 +180,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -229,7 +229,7 @@ fn main() { let extent = [info.width * 2, info.height * 2, 1]; let upload_buffer = Buffer::new_slice( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC, ..Default::default() @@ -248,7 +248,7 @@ fn main() { .unwrap(); let image = Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { format: Format::R8G8B8A8_UNORM, extent, diff --git a/examples/src/bin/image/main.rs b/examples/src/bin/image/main.rs index 0a82f0dc..485e3977 100644 --- a/examples/src/bin/image/main.rs +++ b/examples/src/bin/image/main.rs @@ -155,7 +155,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); #[derive(BufferContents, Vertex)] #[repr(C)] @@ -179,7 +179,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -228,7 +228,7 @@ fn main() { let extent = [info.width, info.height, 1]; let upload_buffer = Buffer::new_slice( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC, ..Default::default() @@ -247,7 +247,7 @@ fn main() { .unwrap(); let image = Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_SRGB, diff --git a/examples/src/bin/immutable-sampler/main.rs b/examples/src/bin/immutable-sampler/main.rs index 68b8862b..3153ae08 100644 --- a/examples/src/bin/immutable-sampler/main.rs +++ b/examples/src/bin/immutable-sampler/main.rs @@ -161,7 +161,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); #[derive(BufferContents, Vertex)] #[repr(C)] @@ -185,7 +185,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -234,7 +234,7 @@ fn main() { let extent = [info.width, info.height, 1]; let upload_buffer = Buffer::new_slice( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC, ..Default::default() @@ -253,7 +253,7 @@ fn main() { .unwrap(); let image = Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_SRGB, diff --git a/examples/src/bin/instancing.rs b/examples/src/bin/instancing.rs index 574fc0e2..0e48d03f 100644 --- a/examples/src/bin/instancing.rs +++ b/examples/src/bin/instancing.rs @@ -168,7 +168,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); // We now create a buffer that will store the shape of our triangle. This triangle is identical // to the one in the `triangle.rs` example. @@ -184,7 +184,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -222,7 +222,7 @@ fn main() { data }; let instance_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() diff --git a/examples/src/bin/interactive_fractal/app.rs b/examples/src/bin/interactive_fractal/app.rs index 91801886..82f6d38a 100644 --- a/examples/src/bin/interactive_fractal/app.rs +++ b/examples/src/bin/interactive_fractal/app.rs @@ -81,7 +81,7 @@ impl FractalApp { ), place_over_frame: RenderPassPlaceOverFrame::new( gfx_queue, - &memory_allocator, + memory_allocator.clone(), command_buffer_allocator, descriptor_set_allocator, image_format, diff --git a/examples/src/bin/interactive_fractal/fractal_compute_pipeline.rs b/examples/src/bin/interactive_fractal/fractal_compute_pipeline.rs index b76d3a0e..bc963efc 100644 --- a/examples/src/bin/interactive_fractal/fractal_compute_pipeline.rs +++ b/examples/src/bin/interactive_fractal/fractal_compute_pipeline.rs @@ -59,7 +59,7 @@ impl FractalComputePipeline { ]; let palette_size = colors.len() as i32; let palette = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::STORAGE_BUFFER, ..Default::default() @@ -119,7 +119,7 @@ impl FractalComputePipeline { colors.push([r, g, b, a]); } self.palette = Buffer::from_iter( - &self.memory_allocator, + self.memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::STORAGE_BUFFER, ..Default::default() diff --git a/examples/src/bin/interactive_fractal/pixels_draw_pipeline.rs b/examples/src/bin/interactive_fractal/pixels_draw_pipeline.rs index 06e8048c..9a24aea5 100644 --- a/examples/src/bin/interactive_fractal/pixels_draw_pipeline.rs +++ b/examples/src/bin/interactive_fractal/pixels_draw_pipeline.rs @@ -22,7 +22,7 @@ use vulkano::{ sampler::{Filter, Sampler, SamplerAddressMode, SamplerCreateInfo, SamplerMipmapMode}, view::ImageView, }, - memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter}, + memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator}, pipeline::{ graphics::{ color_blend::ColorBlendState, @@ -89,13 +89,13 @@ impl PixelsDrawPipeline { pub fn new( gfx_queue: Arc, subpass: Subpass, - memory_allocator: &impl MemoryAllocator, + memory_allocator: Arc, command_buffer_allocator: Arc, descriptor_set_allocator: Arc, ) -> PixelsDrawPipeline { let (vertices, indices) = textured_quad(2.0, 2.0); let vertex_buffer = Buffer::from_iter( - memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() diff --git a/examples/src/bin/interactive_fractal/place_over_frame.rs b/examples/src/bin/interactive_fractal/place_over_frame.rs index f348cb19..d232820a 100644 --- a/examples/src/bin/interactive_fractal/place_over_frame.rs +++ b/examples/src/bin/interactive_fractal/place_over_frame.rs @@ -18,7 +18,7 @@ use vulkano::{ device::Queue, format::Format, image::view::ImageView, - memory::allocator::MemoryAllocator, + memory::allocator::StandardMemoryAllocator, render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass}, sync::GpuFuture, }; @@ -34,7 +34,7 @@ pub struct RenderPassPlaceOverFrame { impl RenderPassPlaceOverFrame { pub fn new( gfx_queue: Arc, - memory_allocator: &impl MemoryAllocator, + memory_allocator: Arc, command_buffer_allocator: Arc, descriptor_set_allocator: Arc, output_format: Format, diff --git a/examples/src/bin/msaa-renderpass.rs b/examples/src/bin/msaa-renderpass.rs index 990af118..9515b0bc 100644 --- a/examples/src/bin/msaa-renderpass.rs +++ b/examples/src/bin/msaa-renderpass.rs @@ -62,7 +62,7 @@ // non-multisampled image. This operation is not a regular blit (blitting a multisampled image is // an error), instead it is called *resolving* the image. -use std::{fs::File, io::BufWriter, path::Path}; +use std::{fs::File, io::BufWriter, path::Path, sync::Arc}; use vulkano::{ buffer::{Buffer, BufferContents, BufferCreateInfo, BufferUsage}, command_buffer::{ @@ -150,7 +150,7 @@ fn main() { .unwrap(); let queue = queues.next().unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); // Creating our intermediate multisampled image. // @@ -158,7 +158,7 @@ fn main() { // image. But we also pass the number of samples-per-pixel, which is 4 here. let intermediary = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, @@ -175,7 +175,7 @@ fn main() { // This is the final image that will receive the anti-aliased triangle. let image = Image::new( - &memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, @@ -299,7 +299,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -368,7 +368,7 @@ fn main() { let command_buffer_allocator = StandardCommandBufferAllocator::new(device, Default::default()); let buf = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::TRANSFER_DST, ..Default::default() diff --git a/examples/src/bin/multi-window.rs b/examples/src/bin/multi-window.rs index dded6ca8..9506f581 100644 --- a/examples/src/bin/multi-window.rs +++ b/examples/src/bin/multi-window.rs @@ -177,7 +177,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); #[derive(BufferContents, Vertex)] #[repr(C)] @@ -198,7 +198,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() diff --git a/examples/src/bin/multi_window_game_of_life/game_of_life.rs b/examples/src/bin/multi_window_game_of_life/game_of_life.rs index b92042dd..7092f904 100644 --- a/examples/src/bin/multi_window_game_of_life/game_of_life.rs +++ b/examples/src/bin/multi_window_game_of_life/game_of_life.rs @@ -23,7 +23,7 @@ use vulkano::{ device::Queue, format::Format, image::{view::ImageView, Image, ImageCreateInfo, ImageType, ImageUsage}, - memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter}, + memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator}, pipeline::{ compute::ComputePipelineCreateInfo, layout::PipelineDescriptorSetLayoutCreateInfo, ComputePipeline, Pipeline, PipelineBindPoint, PipelineLayout, @@ -46,7 +46,7 @@ pub struct GameOfLifeComputePipeline { image: Arc, } -fn rand_grid(memory_allocator: &impl MemoryAllocator, size: [u32; 2]) -> Subbuffer<[u32]> { +fn rand_grid(memory_allocator: Arc, size: [u32; 2]) -> Subbuffer<[u32]> { Buffer::from_iter( memory_allocator, BufferCreateInfo { @@ -66,8 +66,8 @@ fn rand_grid(memory_allocator: &impl MemoryAllocator, size: [u32; 2]) -> Subbuff impl GameOfLifeComputePipeline { pub fn new(app: &App, compute_queue: Arc, size: [u32; 2]) -> GameOfLifeComputePipeline { let memory_allocator = app.context.memory_allocator(); - let life_in = rand_grid(memory_allocator, size); - let life_out = rand_grid(memory_allocator, size); + let life_in = rand_grid(memory_allocator.clone(), size); + let life_out = rand_grid(memory_allocator.clone(), size); let compute_life_pipeline = { let device = compute_queue.device(); @@ -94,7 +94,7 @@ impl GameOfLifeComputePipeline { let image = ImageView::new_default( Image::new( - memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, diff --git a/examples/src/bin/multi_window_game_of_life/pixels_draw.rs b/examples/src/bin/multi_window_game_of_life/pixels_draw.rs index b8ceba66..85d4e8fb 100644 --- a/examples/src/bin/multi_window_game_of_life/pixels_draw.rs +++ b/examples/src/bin/multi_window_game_of_life/pixels_draw.rs @@ -91,7 +91,7 @@ impl PixelsDrawPipeline { let (vertices, indices) = textured_quad(2.0, 2.0); let memory_allocator = app.context.memory_allocator(); let vertex_buffer = Buffer::from_iter( - memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -105,7 +105,7 @@ impl PixelsDrawPipeline { ) .unwrap(); let index_buffer = Buffer::from_iter( - memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::INDEX_BUFFER, ..Default::default() diff --git a/examples/src/bin/multiview.rs b/examples/src/bin/multiview.rs index 6b53fe37..19e61e9f 100644 --- a/examples/src/bin/multiview.rs +++ b/examples/src/bin/multiview.rs @@ -12,7 +12,7 @@ // multiple perspectives or cameras are very similar like in virtual reality or other types of // stereoscopic rendering where the left and right eye only differ in a small position offset. -use std::{fs::File, io::BufWriter, path::Path}; +use std::{fs::File, io::BufWriter, path::Path, sync::Arc}; use vulkano::{ buffer::{Buffer, BufferContents, BufferCreateInfo, BufferUsage, Subbuffer}, command_buffer::{ @@ -134,10 +134,10 @@ fn main() { let queue = queues.next().unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let image = Image::new( - &memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::B8G8R8A8_SRGB, @@ -171,7 +171,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -313,7 +313,7 @@ fn main() { let create_buffer = || { Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::TRANSFER_DST, ..Default::default() diff --git a/examples/src/bin/occlusion-query.rs b/examples/src/bin/occlusion-query.rs index 5591eebd..8d85df83 100644 --- a/examples/src/bin/occlusion-query.rs +++ b/examples/src/bin/occlusion-query.rs @@ -150,7 +150,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); #[derive(BufferContents, Vertex)] #[repr(C)] @@ -206,7 +206,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -360,7 +360,7 @@ fn main() { &images, render_pass.clone(), &mut viewport, - &memory_allocator, + memory_allocator.clone(), ); let mut recreate_swapchain = false; @@ -401,7 +401,7 @@ fn main() { &new_images, render_pass.clone(), &mut viewport, - &memory_allocator, + memory_allocator.clone(), ); recreate_swapchain = false; } @@ -565,7 +565,7 @@ fn window_size_dependent_setup( images: &[Arc], render_pass: Arc, viewport: &mut Viewport, - memory_allocator: &StandardMemoryAllocator, + memory_allocator: Arc, ) -> Vec> { let extent = images[0].extent(); viewport.extent = [extent[0] as f32, extent[1] as f32]; diff --git a/examples/src/bin/push-constants.rs b/examples/src/bin/push-constants.rs index cd91fe19..672b7666 100644 --- a/examples/src/bin/push-constants.rs +++ b/examples/src/bin/push-constants.rs @@ -12,6 +12,7 @@ // modifying and binding descriptor sets for each update. As a result, they are expected to // outperform such memory-backed resource updates. +use std::sync::Arc; use vulkano::{ buffer::{Buffer, BufferCreateInfo, BufferUsage}, command_buffer::{ @@ -140,13 +141,13 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default()); let data_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::STORAGE_BUFFER, ..Default::default() diff --git a/examples/src/bin/push-descriptors/main.rs b/examples/src/bin/push-descriptors/main.rs index a886e702..b0d4f386 100644 --- a/examples/src/bin/push-descriptors/main.rs +++ b/examples/src/bin/push-descriptors/main.rs @@ -151,7 +151,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); #[derive(BufferContents, Vertex)] #[repr(C)] @@ -175,7 +175,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -223,7 +223,7 @@ fn main() { let extent = [info.width, info.height, 1]; let upload_buffer = Buffer::new_slice( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC, ..Default::default() @@ -242,7 +242,7 @@ fn main() { .unwrap(); let image = Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_SRGB, diff --git a/examples/src/bin/runtime-shader/main.rs b/examples/src/bin/runtime-shader/main.rs index f04fca88..7333e5d0 100644 --- a/examples/src/bin/runtime-shader/main.rs +++ b/examples/src/bin/runtime-shader/main.rs @@ -236,7 +236,7 @@ fn main() { let mut recreate_swapchain = false; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); #[derive(BufferContents, Vertex)] #[repr(C)] @@ -262,7 +262,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() diff --git a/examples/src/bin/runtime_array/main.rs b/examples/src/bin/runtime_array/main.rs index dee566ca..37c26aad 100644 --- a/examples/src/bin/runtime_array/main.rs +++ b/examples/src/bin/runtime_array/main.rs @@ -163,7 +163,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); #[derive(BufferContents, Vertex)] #[repr(C)] @@ -239,7 +239,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -289,7 +289,7 @@ fn main() { let extent = [info.width, info.height, 1]; let upload_buffer = Buffer::new_slice( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC, ..Default::default() @@ -308,7 +308,7 @@ fn main() { .unwrap(); let image = Image::new( - &memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_SRGB, @@ -338,7 +338,7 @@ fn main() { let extent = [info.width, info.height, 1]; let upload_buffer = Buffer::new_slice( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC, ..Default::default() @@ -357,7 +357,7 @@ fn main() { .unwrap(); let image = Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_SRGB, diff --git a/examples/src/bin/self-copy-buffer.rs b/examples/src/bin/self-copy-buffer.rs index d0b26820..1f2dad12 100644 --- a/examples/src/bin/self-copy-buffer.rs +++ b/examples/src/bin/self-copy-buffer.rs @@ -11,6 +11,7 @@ // and then we use `copy_buffer_dimensions` to copy the first half of the input buffer to the // second half. +use std::sync::Arc; use vulkano::{ buffer::{Buffer, BufferCreateInfo, BufferUsage}, command_buffer::{ @@ -132,13 +133,13 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default()); let data_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::STORAGE_BUFFER | BufferUsage::TRANSFER_SRC diff --git a/examples/src/bin/shader-include/main.rs b/examples/src/bin/shader-include/main.rs index 9d952c84..be356115 100644 --- a/examples/src/bin/shader-include/main.rs +++ b/examples/src/bin/shader-include/main.rs @@ -11,6 +11,7 @@ // source code. The boilerplate is taken from the "basic-compute-shader.rs" example, where most of // the boilerplate is explained. +use std::sync::Arc; use vulkano::{ buffer::{Buffer, BufferCreateInfo, BufferUsage}, command_buffer::{ @@ -140,13 +141,13 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default()); let data_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::STORAGE_BUFFER, ..Default::default() diff --git a/examples/src/bin/shader-types-sharing.rs b/examples/src/bin/shader-types-sharing.rs index 80f923ab..c7db2c82 100644 --- a/examples/src/bin/shader-types-sharing.rs +++ b/examples/src/bin/shader-types-sharing.rs @@ -232,14 +232,14 @@ fn main() { future.wait(None).unwrap(); } - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default()); let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); // Prepare test array `[0, 1, 2, 3....]`. let data_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::STORAGE_BUFFER, ..Default::default() diff --git a/examples/src/bin/simple-particles.rs b/examples/src/bin/simple-particles.rs index 482d815d..810d3b50 100644 --- a/examples/src/bin/simple-particles.rs +++ b/examples/src/bin/simple-particles.rs @@ -326,7 +326,7 @@ fn main() { } } - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default()); @@ -353,7 +353,7 @@ fn main() { // Create a CPU-accessible buffer initialized with the vertex data. let temporary_accessible_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { // Specify this buffer will be used as a transfer source. usage: BufferUsage::TRANSFER_SRC, @@ -372,7 +372,7 @@ fn main() { // Create a buffer in device-local memory with enough space for `PARTICLE_COUNT` number of // `Vertex`. let device_local_buffer = Buffer::new_slice::( - &memory_allocator, + memory_allocator, BufferCreateInfo { // Specify use as a storage buffer, vertex buffer, and transfer destination. usage: BufferUsage::STORAGE_BUFFER diff --git a/examples/src/bin/specialization-constants.rs b/examples/src/bin/specialization-constants.rs index 3a6b55cd..fbbd3d2f 100644 --- a/examples/src/bin/specialization-constants.rs +++ b/examples/src/bin/specialization-constants.rs @@ -9,6 +9,7 @@ // TODO: Give a paragraph about what specialization are and what problems they solve. +use std::sync::Arc; use vulkano::{ buffer::{Buffer, BufferCreateInfo, BufferUsage}, command_buffer::{ @@ -140,13 +141,13 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone()); let command_buffer_allocator = StandardCommandBufferAllocator::new(device.clone(), Default::default()); let data_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::STORAGE_BUFFER, ..Default::default() diff --git a/examples/src/bin/teapot/main.rs b/examples/src/bin/teapot/main.rs index 080c8164..8223d412 100644 --- a/examples/src/bin/teapot/main.rs +++ b/examples/src/bin/teapot/main.rs @@ -162,7 +162,7 @@ fn main() { let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -176,7 +176,7 @@ fn main() { ) .unwrap(); let normals_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -190,7 +190,7 @@ fn main() { ) .unwrap(); let index_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::INDEX_BUFFER, ..Default::default() @@ -247,7 +247,7 @@ fn main() { .unwrap(); let (mut pipeline, mut framebuffers) = window_size_dependent_setup( - &memory_allocator, + memory_allocator.clone(), vs.clone(), fs.clone(), &images, @@ -295,7 +295,7 @@ fn main() { swapchain = new_swapchain; let (new_pipeline, new_framebuffers) = window_size_dependent_setup( - &memory_allocator, + memory_allocator.clone(), vs.clone(), fs.clone(), &new_images, @@ -436,12 +436,13 @@ fn main() { /// This function is called once during initialization, then again whenever the window is resized. fn window_size_dependent_setup( - memory_allocator: &StandardMemoryAllocator, + memory_allocator: Arc, vs: EntryPoint, fs: EntryPoint, images: &[Arc], render_pass: Arc, ) -> (Arc, Vec>) { + let device = memory_allocator.device().clone(); let extent = images[0].extent(); let depth_buffer = ImageView::new_default( @@ -480,7 +481,6 @@ fn window_size_dependent_setup( // driver to optimize things, at the cost of slower window resizes. // https://computergraphics.stackexchange.com/questions/5742/vulkan-best-way-of-updating-pipeline-viewport let pipeline = { - let device = memory_allocator.device(); let vertex_input_state = [Position::per_vertex(), Normal::per_vertex()] .definition(&vs.info().input_interface) .unwrap(); @@ -498,7 +498,7 @@ fn window_size_dependent_setup( let subpass = Subpass::from(render_pass, 0).unwrap(); GraphicsPipeline::new( - device.clone(), + device, None, GraphicsPipelineCreateInfo { stages: stages.into_iter().collect(), diff --git a/examples/src/bin/tessellation.rs b/examples/src/bin/tessellation.rs index ad014c17..a53295ae 100644 --- a/examples/src/bin/tessellation.rs +++ b/examples/src/bin/tessellation.rs @@ -262,7 +262,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); #[derive(BufferContents, Vertex)] #[repr(C)] @@ -301,7 +301,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() diff --git a/examples/src/bin/texture_array/main.rs b/examples/src/bin/texture_array/main.rs index 298a2020..2ceda9a5 100644 --- a/examples/src/bin/texture_array/main.rs +++ b/examples/src/bin/texture_array/main.rs @@ -157,7 +157,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); #[derive(BufferContents, Vertex)] #[repr(C)] @@ -181,7 +181,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() @@ -235,7 +235,7 @@ fn main() { .product::() * array_layers as DeviceSize; let upload_buffer = Buffer::new_slice( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC, ..Default::default() @@ -266,7 +266,7 @@ fn main() { } let image = Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format, diff --git a/examples/src/bin/triangle-v1_3.rs b/examples/src/bin/triangle-v1_3.rs index 4ee2d894..cd550e03 100644 --- a/examples/src/bin/triangle-v1_3.rs +++ b/examples/src/bin/triangle-v1_3.rs @@ -287,7 +287,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); // We now create a buffer that will store the shape of our triangle. We use `#[repr(C)]` here // to force rustc to use a defined layout for our data, as the default representation has *no @@ -311,7 +311,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() diff --git a/examples/src/bin/triangle.rs b/examples/src/bin/triangle.rs index ca24a9b6..b3912843 100644 --- a/examples/src/bin/triangle.rs +++ b/examples/src/bin/triangle.rs @@ -256,7 +256,7 @@ fn main() { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); // We now create a buffer that will store the shape of our triangle. We use `#[repr(C)]` here // to force rustc to use a defined layout for our data, as the default representation has *no @@ -280,7 +280,7 @@ fn main() { }, ]; let vertex_buffer = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() diff --git a/vulkano-util/src/renderer.rs b/vulkano-util/src/renderer.rs index 627ca185..4e0a0a14 100644 --- a/vulkano-util/src/renderer.rs +++ b/vulkano-util/src/renderer.rs @@ -225,7 +225,7 @@ impl VulkanoWindowRenderer { let final_view_image = self.final_views[0].image(); let image = ImageView::new_default( Image::new( - &self.memory_allocator, + self.memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format, diff --git a/vulkano/src/buffer/allocator.rs b/vulkano/src/buffer/allocator.rs index 68c9656d..6f8b4f65 100644 --- a/vulkano/src/buffer/allocator.rs +++ b/vulkano/src/buffer/allocator.rs @@ -134,7 +134,7 @@ const MAX_ARENAS: usize = 32; /// } /// ``` #[derive(Debug)] -pub struct SubbufferAllocator> { +pub struct SubbufferAllocator { state: UnsafeCell>, } @@ -143,7 +143,7 @@ where A: MemoryAllocator, { /// Creates a new `SubbufferAllocator`. - pub fn new(memory_allocator: A, create_info: SubbufferAllocatorCreateInfo) -> Self { + pub fn new(memory_allocator: Arc, create_info: SubbufferAllocatorCreateInfo) -> Self { let SubbufferAllocatorCreateInfo { arena_size, buffer_usage, @@ -278,7 +278,7 @@ where #[derive(Debug)] struct SubbufferAllocatorState { - memory_allocator: A, + memory_allocator: Arc, buffer_usage: BufferUsage, memory_type_filter: MemoryTypeFilter, // The alignment required for the subbuffers. @@ -358,7 +358,7 @@ where fn create_arena(&self) -> Result, MemoryAllocatorError> { Buffer::new( - &self.memory_allocator, + self.memory_allocator.clone(), BufferCreateInfo { usage: self.buffer_usage, ..Default::default() @@ -455,7 +455,7 @@ mod tests { #[test] fn reserve() { let (device, _) = gfx_dev_and_queue!(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let buffer_allocator = SubbufferAllocator::new( memory_allocator, @@ -473,7 +473,7 @@ mod tests { #[test] fn capacity_increase() { let (device, _) = gfx_dev_and_queue!(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let buffer_allocator = SubbufferAllocator::new( memory_allocator, diff --git a/vulkano/src/buffer/mod.rs b/vulkano/src/buffer/mod.rs index 37122b43..efba5370 100644 --- a/vulkano/src/buffer/mod.rs +++ b/vulkano/src/buffer/mod.rs @@ -85,11 +85,11 @@ use crate::{ macros::{vulkan_bitflags, vulkan_enum}, memory::{ allocator::{ - AllocationCreateInfo, AllocationType, DeviceLayout, MemoryAlloc, MemoryAllocator, + AllocationCreateInfo, AllocationType, DeviceLayout, MemoryAllocator, MemoryAllocatorError, }, DedicatedAllocation, ExternalMemoryHandleType, ExternalMemoryHandleTypes, - ExternalMemoryProperties, MemoryRequirements, + ExternalMemoryProperties, MemoryRequirements, ResourceMemory, }, range_map::RangeMap, sync::{future::AccessError, AccessConflict, CurrentAccess, Sharing}, @@ -141,7 +141,7 @@ pub mod view; /// /// # let device: std::sync::Arc = return; /// # let queue: std::sync::Arc = return; -/// # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return; +/// # let memory_allocator: std::sync::Arc = return; /// # let command_buffer_allocator: vulkano::command_buffer::allocator::StandardCommandBufferAllocator = return; /// # /// // Simple iterator to construct test data. @@ -149,7 +149,7 @@ pub mod view; /// /// // Create a host-accessible buffer initialized with the data. /// let temporary_accessible_buffer = Buffer::from_iter( -/// &memory_allocator, +/// memory_allocator.clone(), /// BufferCreateInfo { /// // Specify that this buffer will be used as a transfer source. /// usage: BufferUsage::TRANSFER_SRC, @@ -167,7 +167,7 @@ pub mod view; /// /// // Create a buffer in device-local memory with enough space for a slice of `10_000` floats. /// let device_local_buffer = Buffer::new_slice::( -/// &memory_allocator, +/// memory_allocator.clone(), /// BufferCreateInfo { /// // Specify use as a storage buffer and transfer destination. /// usage: BufferUsage::STORAGE_BUFFER | BufferUsage::TRANSFER_DST, @@ -219,7 +219,7 @@ pub enum BufferMemory { /// The buffer is backed by normal memory, bound with [`bind_memory`]. /// /// [`bind_memory`]: RawBuffer::bind_memory - Normal(MemoryAlloc), + Normal(ResourceMemory), /// The buffer is backed by sparse memory, bound with [`bind_sparse`]. /// @@ -237,10 +237,10 @@ impl Buffer { /// /// # Panics /// - /// - Panics if `buffer_info.size` is not zero. + /// - Panics if `create_info.size` is not zero. /// - Panics if the chosen memory type is not host-visible. pub fn from_data( - allocator: &(impl MemoryAllocator + ?Sized), + allocator: Arc, create_info: BufferCreateInfo, allocation_info: AllocationCreateInfo, data: T, @@ -267,11 +267,11 @@ impl Buffer { /// /// # Panics /// - /// - Panics if `buffer_info.size` is not zero. + /// - Panics if `create_info.size` is not zero. /// - Panics if the chosen memory type is not host-visible. /// - Panics if `iter` is empty. pub fn from_iter( - allocator: &(impl MemoryAllocator + ?Sized), + allocator: Arc, create_info: BufferCreateInfo, allocation_info: AllocationCreateInfo, iter: I, @@ -307,7 +307,7 @@ impl Buffer { /// /// - Panics if `buffer_info.size` is not zero. pub fn new_sized( - allocator: &(impl MemoryAllocator + ?Sized), + allocator: Arc, create_info: BufferCreateInfo, allocation_info: AllocationCreateInfo, ) -> Result, Validated> @@ -333,7 +333,7 @@ impl Buffer { /// - Panics if `buffer_info.size` is not zero. /// - Panics if `len` is zero. pub fn new_slice( - allocator: &(impl MemoryAllocator + ?Sized), + allocator: Arc, create_info: BufferCreateInfo, allocation_info: AllocationCreateInfo, len: DeviceSize, @@ -352,7 +352,7 @@ impl Buffer { /// - Panics if `buffer_info.size` is not zero. /// - Panics if `len` is zero. pub fn new_unsized( - allocator: &(impl MemoryAllocator + ?Sized), + allocator: Arc, create_info: BufferCreateInfo, allocation_info: AllocationCreateInfo, len: DeviceSize, @@ -379,7 +379,7 @@ impl Buffer { /// - Panics if `buffer_info.size` is not zero. /// - Panics if `layout.alignment()` is greater than 64. pub fn new( - allocator: &(impl MemoryAllocator + ?Sized), + allocator: Arc, mut create_info: BufferCreateInfo, allocation_info: AllocationCreateInfo, layout: DeviceLayout, @@ -412,6 +412,7 @@ impl Buffer { Some(DedicatedAllocation::Buffer(&raw_buffer)), ) .map_err(BufferAllocateError::AllocateMemory)?; + let allocation = unsafe { ResourceMemory::from_allocation(allocator, allocation) }; let buffer = raw_buffer.bind_memory(allocation).map_err(|(err, _, _)| { err.map(BufferAllocateError::BindMemory) diff --git a/vulkano/src/buffer/subbuffer.rs b/vulkano/src/buffer/subbuffer.rs index 1293f11a..09c87e1b 100644 --- a/vulkano/src/buffer/subbuffer.rs +++ b/vulkano/src/buffer/subbuffer.rs @@ -301,7 +301,7 @@ where /// 64. [`SubbufferAllocator`] does this automatically. /// /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT - /// [`invalidate_range`]: crate::memory::allocator::MemoryAlloc::invalidate_range + /// [`invalidate_range`]: crate::memory::ResourceMemory::invalidate_range /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size /// [`write`]: Self::write /// [`SubbufferAllocator`]: super::allocator::SubbufferAllocator @@ -312,8 +312,8 @@ where }; let range = if let Some(atom_size) = allocation.atom_size() { - // This works because the suballocators align allocations to the non-coherent atom size - // when the memory is host-visible but not host-coherent. + // This works because the memory allocator must align allocations to the non-coherent + // atom size when the memory is host-visible but not host-coherent. let start = align_down(self.offset, atom_size); let end = cmp::min( align_up(self.offset + self.size, atom_size), @@ -387,7 +387,7 @@ where /// does this automatically. /// /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT - /// [`flush_range`]: crate::memory::allocator::MemoryAlloc::flush_range + /// [`flush_range`]: crate::memory::ResourceMemory::flush_range /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size /// [`read`]: Self::read /// [`SubbufferAllocator`]: super::allocator::SubbufferAllocator @@ -398,8 +398,8 @@ where }; let range = if let Some(atom_size) = allocation.atom_size() { - // This works because the suballocators align allocations to the non-coherent atom size - // when the memory is host-visible but not host-coherent. + // This works because the memory allocator must align allocations to the non-coherent + // atom size when the memory is host-visible but not host-coherent. let start = align_down(self.offset, atom_size); let end = cmp::min( align_up(self.offset + self.size, atom_size), @@ -1142,7 +1142,7 @@ mod tests { AllocationCreateInfo, AllocationType, DeviceLayout, MemoryAllocator, StandardMemoryAllocator, }, - MemoryRequirements, + MemoryRequirements, ResourceMemory, }, }; @@ -1211,10 +1211,10 @@ mod tests { #[test] fn split_at() { let (device, _) = gfx_dev_and_queue!(); - let allocator = StandardMemoryAllocator::new_default(device); + let allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let buffer = Buffer::new_slice::( - &allocator, + allocator, BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC, ..Default::default() @@ -1248,7 +1248,7 @@ mod tests { #[test] fn cast_aligned() { let (device, _) = gfx_dev_and_queue!(); - let allocator = StandardMemoryAllocator::new_default(device.clone()); + let allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let raw_buffer = RawBuffer::new( device, @@ -1288,6 +1288,7 @@ mod tests { None, ) .unwrap(); + let allocation = unsafe { ResourceMemory::from_allocation(allocator, allocation) }; let buffer = Buffer::from_raw(raw_buffer, BufferMemory::Normal(allocation)); let buffer = Subbuffer::from(Arc::new(buffer)); diff --git a/vulkano/src/buffer/sys.rs b/vulkano/src/buffer/sys.rs index 4e8001b1..ab528573 100644 --- a/vulkano/src/buffer/sys.rs +++ b/vulkano/src/buffer/sys.rs @@ -20,9 +20,9 @@ use crate::{ instance::InstanceOwnedDebugWrapper, macros::impl_id_counter, memory::{ - allocator::{AllocationType, DeviceLayout, MemoryAlloc}, + allocator::{AllocationType, DeviceLayout}, is_aligned, DedicatedTo, ExternalMemoryHandleTypes, MemoryAllocateFlags, - MemoryPropertyFlags, MemoryRequirements, + MemoryPropertyFlags, MemoryRequirements, ResourceMemory, }, sync::Sharing, DeviceSize, Requires, RequiresAllOf, RequiresOneOf, Validated, ValidationError, Version, @@ -277,8 +277,8 @@ impl RawBuffer { /// Binds device memory to this buffer. pub fn bind_memory( self, - allocation: MemoryAlloc, - ) -> Result, RawBuffer, MemoryAlloc)> { + allocation: ResourceMemory, + ) -> Result, RawBuffer, ResourceMemory)> { if let Err(err) = self.validate_bind_memory(&allocation) { return Err((err.into(), self, allocation)); } @@ -287,7 +287,10 @@ impl RawBuffer { .map_err(|(err, buffer, allocation)| (err.into(), buffer, allocation)) } - fn validate_bind_memory(&self, allocation: &MemoryAlloc) -> Result<(), Box> { + fn validate_bind_memory( + &self, + allocation: &ResourceMemory, + ) -> Result<(), Box> { assert_ne!(allocation.allocation_type(), AllocationType::NonLinear); let physical_device = self.device().physical_device(); @@ -497,8 +500,8 @@ impl RawBuffer { #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] pub unsafe fn bind_memory_unchecked( self, - allocation: MemoryAlloc, - ) -> Result { + allocation: ResourceMemory, + ) -> Result { let memory = allocation.device_memory(); let memory_offset = allocation.offset(); diff --git a/vulkano/src/buffer/view.rs b/vulkano/src/buffer/view.rs index aed2fd35..61cbf159 100644 --- a/vulkano/src/buffer/view.rs +++ b/vulkano/src/buffer/view.rs @@ -25,9 +25,9 @@ //! use vulkano::memory::allocator::AllocationCreateInfo; //! //! # let queue: Arc = return; -//! # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return; +//! # let memory_allocator: Arc = return; //! let buffer = Buffer::new_slice::( -//! &memory_allocator, +//! memory_allocator.clone(), //! BufferCreateInfo { //! usage: BufferUsage::STORAGE_TEXEL_BUFFER, //! ..Default::default() @@ -456,15 +456,16 @@ mod tests { format::Format, memory::allocator::{AllocationCreateInfo, StandardMemoryAllocator}, }; + use std::sync::Arc; #[test] fn create_uniform() { // `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format let (device, _) = gfx_dev_and_queue!(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let buffer = Buffer::new_slice::<[u8; 4]>( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::UNIFORM_TEXEL_BUFFER, ..Default::default() @@ -488,10 +489,10 @@ mod tests { fn create_storage() { // `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format let (device, _) = gfx_dev_and_queue!(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let buffer = Buffer::new_slice::<[u8; 4]>( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::STORAGE_TEXEL_BUFFER, ..Default::default() @@ -514,10 +515,10 @@ mod tests { fn create_storage_atomic() { // `VK_FORMAT_R32_UINT` guaranteed to be a supported format for atomics let (device, _) = gfx_dev_and_queue!(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let buffer = Buffer::new_slice::( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::STORAGE_TEXEL_BUFFER, ..Default::default() @@ -540,10 +541,10 @@ mod tests { fn wrong_usage() { // `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format let (device, _) = gfx_dev_and_queue!(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let buffer = Buffer::new_slice::<[u8; 4]>( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::TRANSFER_DST, // Dummy value ..Default::default() @@ -568,10 +569,10 @@ mod tests { #[test] fn unsupported_format() { let (device, _) = gfx_dev_and_queue!(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let buffer = Buffer::new_slice::<[f64; 4]>( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::UNIFORM_TEXEL_BUFFER | BufferUsage::STORAGE_TEXEL_BUFFER, ..Default::default() diff --git a/vulkano/src/command_buffer/auto/mod.rs b/vulkano/src/command_buffer/auto/mod.rs index d7058bd7..c84f08d4 100644 --- a/vulkano/src/command_buffer/auto/mod.rs +++ b/vulkano/src/command_buffer/auto/mod.rs @@ -339,6 +339,7 @@ mod tests { shader::ShaderStages, sync::GpuFuture, }; + use std::sync::Arc; #[test] fn basic_creation() { @@ -376,10 +377,10 @@ mod tests { .unwrap(); let queue = queues.next().unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let source = Buffer::from_iter( - &memory_allocator, + memory_allocator.clone(), BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC, ..Default::default() @@ -394,7 +395,7 @@ mod tests { .unwrap(); let destination = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::TRANSFER_DST, ..Default::default() @@ -506,9 +507,9 @@ mod tests { fn buffer_self_copy_overlapping() { let (device, queue) = gfx_dev_and_queue!(); - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let source = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC | BufferUsage::TRANSFER_DST, ..Default::default() @@ -561,9 +562,9 @@ mod tests { fn buffer_self_copy_not_overlapping() { let (device, queue) = gfx_dev_and_queue!(); - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let source = Buffer::from_iter( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::TRANSFER_SRC | BufferUsage::TRANSFER_DST, ..Default::default() @@ -613,10 +614,10 @@ mod tests { ) .unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); // Create a tiny test buffer let buffer = Buffer::from_data( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::TRANSFER_DST, ..Default::default() @@ -712,9 +713,9 @@ mod tests { ) .unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let buf = Buffer::from_data( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::VERTEX_BUFFER, ..Default::default() diff --git a/vulkano/src/image/mod.rs b/vulkano/src/image/mod.rs index 091ecb97..c0aa45d5 100644 --- a/vulkano/src/image/mod.rs +++ b/vulkano/src/image/mod.rs @@ -49,7 +49,7 @@ //! types. //! - Binding [imported] `DeviceMemory`. //! -//! You can [create a `MemoryAlloc` from `DeviceMemory`] if you want to bind its own block of +//! You can [create a `ResourceMemory` from `DeviceMemory`] if you want to bind its own block of //! memory to an image. //! //! [`ImageView`]: crate::image::view::ImageView @@ -59,7 +59,7 @@ //! [`DeviceMemory`]: crate::memory::DeviceMemory //! [allocated yourself]: crate::memory::DeviceMemory::allocate //! [imported]: crate::memory::DeviceMemory::import -//! [create a `MemoryAlloc` from `DeviceMemory`]: MemoryAlloc::new +//! [create a `ResourceMemory` from `DeviceMemory`]: ResourceMemory::new_dedicated pub use self::{aspect::*, layout::*, sys::ImageCreateInfo, usage::*}; use self::{sys::RawImage, view::ImageViewType}; @@ -68,9 +68,9 @@ use crate::{ format::{Format, FormatFeatures}, macros::{vulkan_bitflags, vulkan_bitflags_enum, vulkan_enum}, memory::{ - allocator::{AllocationCreateInfo, MemoryAlloc, MemoryAllocator, MemoryAllocatorError}, + allocator::{AllocationCreateInfo, MemoryAllocator, MemoryAllocatorError}, DedicatedAllocation, ExternalMemoryHandleType, ExternalMemoryHandleTypes, - ExternalMemoryProperties, MemoryRequirements, + ExternalMemoryProperties, MemoryRequirements, ResourceMemory, }, range_map::RangeMap, swapchain::Swapchain, @@ -128,7 +128,7 @@ pub enum ImageMemory { /// The image is backed by normal memory, bound with [`bind_memory`]. /// /// [`bind_memory`]: RawImage::bind_memory - Normal(SmallVec<[MemoryAlloc; 4]>), + Normal(SmallVec<[ResourceMemory; 4]>), /// The image is backed by sparse memory, bound with [`bind_sparse`]. /// @@ -145,7 +145,7 @@ pub enum ImageMemory { impl Image { /// Creates a new uninitialized `Image`. pub fn new( - allocator: &(impl MemoryAllocator + ?Sized), + allocator: Arc, create_info: ImageCreateInfo, allocation_info: AllocationCreateInfo, ) -> Result, Validated> { @@ -168,6 +168,7 @@ impl Image { Some(DedicatedAllocation::Image(&raw_image)), ) .map_err(ImageAllocateError::AllocateMemory)?; + let allocation = unsafe { ResourceMemory::from_allocation(allocator, allocation) }; let image = raw_image.bind_memory([allocation]).map_err(|(err, _, _)| { err.map(ImageAllocateError::BindMemory) diff --git a/vulkano/src/image/sampler/ycbcr.rs b/vulkano/src/image/sampler/ycbcr.rs index 55c926b1..38365f46 100644 --- a/vulkano/src/image/sampler/ycbcr.rs +++ b/vulkano/src/image/sampler/ycbcr.rs @@ -48,7 +48,7 @@ //! }; //! //! # let device: std::sync::Arc = return; -//! # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return; +//! # let memory_allocator: std::sync::Arc = return; //! # let descriptor_set_allocator: vulkano::descriptor_set::allocator::StandardDescriptorSetAllocator = return; //! # //! let conversion = SamplerYcbcrConversion::new(device.clone(), SamplerYcbcrConversionCreateInfo { @@ -82,7 +82,7 @@ //! .unwrap(); //! //! let image = Image::new( -//! &memory_allocator, +//! memory_allocator.clone(), //! ImageCreateInfo { //! image_type: ImageType::Dim2d, //! format: Format::G8_B8_R8_3PLANE_420_UNORM, diff --git a/vulkano/src/image/sys.rs b/vulkano/src/image/sys.rs index c953dde5..d6a0e57a 100644 --- a/vulkano/src/image/sys.rs +++ b/vulkano/src/image/sys.rs @@ -33,9 +33,9 @@ use crate::{ instance::InstanceOwnedDebugWrapper, macros::impl_id_counter, memory::{ - allocator::{AllocationType, DeviceLayout, MemoryAlloc}, + allocator::{AllocationType, DeviceLayout}, is_aligned, DedicatedTo, ExternalMemoryHandleTypes, MemoryPropertyFlags, - MemoryRequirements, + MemoryRequirements, ResourceMemory, }, sync::Sharing, Requires, RequiresAllOf, RequiresOneOf, Validated, ValidationError, Version, VulkanError, @@ -707,13 +707,13 @@ impl RawImage { /// `allocations` must contain exactly `self.drm_format_modifier().unwrap().1` elements. pub fn bind_memory( self, - allocations: impl IntoIterator, + allocations: impl IntoIterator, ) -> Result< Image, ( Validated, RawImage, - impl ExactSizeIterator, + impl ExactSizeIterator, ), > { let allocations: SmallVec<[_; 4]> = allocations.into_iter().collect(); @@ -736,7 +736,7 @@ impl RawImage { fn validate_bind_memory( &self, - allocations: &[MemoryAlloc], + allocations: &[ResourceMemory], ) -> Result<(), Box> { let physical_device = self.device().physical_device(); @@ -1072,13 +1072,13 @@ impl RawImage { #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] pub unsafe fn bind_memory_unchecked( self, - allocations: impl IntoIterator, + allocations: impl IntoIterator, ) -> Result< Image, ( VulkanError, RawImage, - impl ExactSizeIterator, + impl ExactSizeIterator, ), > { let allocations: SmallVec<[_; 4]> = allocations.into_iter().collect(); diff --git a/vulkano/src/memory/allocator/mod.rs b/vulkano/src/memory/allocator/mod.rs index e21e9389..65ced9ea 100644 --- a/vulkano/src/memory/allocator/mod.rs +++ b/vulkano/src/memory/allocator/mod.rs @@ -10,10 +10,10 @@ //! In Vulkan, suballocation of [`DeviceMemory`] is left to the application, because every //! application has slightly different needs and one can not incorporate an allocator into the //! driver that would perform well in all cases. Vulkano stays true to this sentiment, but aims to -//! reduce the burden on the user as much as possible. You have a toolbox of configurable -//! [suballocators] to choose from that cover all allocation algorithms, which you can compose into -//! any kind of [hierarchy] you wish. This way you have maximum flexibility while still only using -//! a few `DeviceMemory` blocks and not writing any of the very error-prone code. +//! reduce the burden on the user as much as possible. You have a toolbox of [suballocators] to +//! choose from that cover all allocation algorithms, which you can compose into any kind of +//! [hierarchy] you wish. This way you have maximum flexibility while still only using a few +//! `DeviceMemory` blocks and not writing any of the very error-prone code. //! //! If you just want to allocate memory and don't have any special needs, look no further than the //! [`StandardMemoryAllocator`]. @@ -223,8 +223,8 @@ use self::array_vec::ArrayVec; pub use self::{ layout::DeviceLayout, suballocator::{ - AllocationType, BuddyAllocator, BumpAllocator, FreeListAllocator, MemoryAlloc, - PoolAllocator, SuballocationCreateInfo, Suballocator, SuballocatorError, + AllocationType, BuddyAllocator, BumpAllocator, FreeListAllocator, Suballocation, + Suballocator, SuballocatorError, }, }; use super::{ @@ -242,8 +242,9 @@ use ash::vk::{MAX_MEMORY_HEAPS, MAX_MEMORY_TYPES}; use parking_lot::RwLock; use std::{ error::Error, - fmt::{Display, Error as FmtError, Formatter}, + fmt::{Debug, Display, Error as FmtError, Formatter}, ops::BitOr, + ptr, sync::Arc, }; @@ -253,7 +254,11 @@ const M: DeviceSize = 1024 * K; const G: DeviceSize = 1024 * M; /// General-purpose memory allocators which allocate from any memory type dynamically as needed. -pub unsafe trait MemoryAllocator: DeviceOwned { +/// +/// # Safety +/// +/// TODO +pub unsafe trait MemoryAllocator: DeviceOwned + Send + Sync + 'static { /// Finds the most suitable memory type index in `memory_type_bits` using the given `filter`. /// Returns [`None`] if the requirements are too strict and no memory type is able to satisfy /// them. @@ -269,12 +274,17 @@ pub unsafe trait MemoryAllocator: DeviceOwned { /// /// - `memory_type_index` - The index of the memory type to allocate from. /// + /// - `layout` - The layout of the allocation. + /// + /// - `allocation_type` - The type of resources that can be bound to the allocation. + /// /// - `never_allocate` - If `true` then the allocator should never allocate `DeviceMemory`, /// instead only suballocate from existing blocks. fn allocate_from_type( &self, memory_type_index: u32, - create_info: SuballocationCreateInfo, + layout: DeviceLayout, + allocation_type: AllocationType, never_allocate: bool, ) -> Result; @@ -319,14 +329,27 @@ pub unsafe trait MemoryAllocator: DeviceOwned { dedicated_allocation: Option>, ) -> Result; - /// Creates a root allocation/dedicated allocation. + /// Creates an allocation with a whole device memory block dedicated to it. fn allocate_dedicated( &self, memory_type_index: u32, allocation_size: DeviceSize, dedicated_allocation: Option>, export_handle_types: ExternalMemoryHandleTypes, - ) -> Result>; + ) -> Result; + + /// Deallocates the given `allocation`. + /// + /// # Safety + /// + /// - `allocation` must refer to a **currently allocated** allocation of `self`. + unsafe fn deallocate(&self, allocation: MemoryAlloc); +} + +impl Debug for dyn MemoryAllocator { + fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> { + f.debug_struct("MemoryAllocator").finish_non_exhaustive() + } } /// Describes what memory property flags are required, preferred and not preferred when picking a @@ -343,12 +366,12 @@ pub unsafe trait MemoryAllocator: DeviceOwned { /// # memory::allocator::{AllocationCreateInfo, MemoryTypeFilter}, /// # }; /// # -/// # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return; +/// # let memory_allocator: std::sync::Arc = return; /// # let format = return; /// # let extent = return; /// # /// let texture = Image::new( -/// &memory_allocator, +/// memory_allocator.clone(), /// ImageCreateInfo { /// format, /// extent, @@ -373,10 +396,10 @@ pub unsafe trait MemoryAllocator: DeviceOwned { /// # memory::allocator::{AllocationCreateInfo, MemoryTypeFilter}, /// # }; /// # -/// # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return; +/// # let memory_allocator: std::sync::Arc = return; /// # /// let staging_buffer = Buffer::new_sized( -/// &memory_allocator, +/// memory_allocator.clone(), /// BufferCreateInfo { /// usage: BufferUsage::TRANSFER_SRC, /// ..Default::default() @@ -401,10 +424,10 @@ pub unsafe trait MemoryAllocator: DeviceOwned { /// # memory::allocator::{AllocationCreateInfo, MemoryTypeFilter}, /// # }; /// # -/// # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return; +/// # let memory_allocator: std::sync::Arc = return; /// # /// let uniform_buffer = Buffer::new_sized( -/// &memory_allocator, +/// memory_allocator.clone(), /// BufferCreateInfo { /// usage: BufferUsage::UNIFORM_BUFFER, /// ..Default::default() @@ -428,10 +451,10 @@ pub unsafe trait MemoryAllocator: DeviceOwned { /// # memory::allocator::{AllocationCreateInfo, MemoryTypeFilter}, /// # }; /// # -/// # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return; +/// # let memory_allocator: std::sync::Arc = return; /// # /// let readback_buffer = Buffer::new_sized( -/// &memory_allocator, +/// memory_allocator.clone(), /// BufferCreateInfo { /// usage: BufferUsage::TRANSFER_DST, /// ..Default::default() @@ -640,7 +663,6 @@ impl Default for AllocationCreateInfo { /// Describes whether allocating [`DeviceMemory`] is desired. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -#[non_exhaustive] pub enum MemoryAllocatePreference { /// There is no known preference, let the allocator decide. Unknown, @@ -658,6 +680,38 @@ pub enum MemoryAllocatePreference { AlwaysAllocate, } +/// An allocation made using a [memory allocator]. +/// +/// [memory allocator]: MemoryAllocator +#[derive(Clone, Debug)] +pub struct MemoryAlloc { + /// The underlying block of device memory. + pub device_memory: Arc, + + /// The suballocation within the device memory block, or [`None`] if this is a dedicated + /// allocation. + pub suballocation: Option, + + /// The type of resources that can be bound to this memory block. This will be exactly equal to + /// the requested allocation type. + /// + /// For dedicated allocations it doesn't matter what this is, as there aren't going to be any + /// neighboring suballocations. Therefore the allocator implementation is advised to always set + /// this to [`AllocationType::Unknown`] in that case for maximum flexibility. + pub allocation_type: AllocationType, + + /// An opaque handle identifying the allocation inside the allocator. + pub allocation_handle: AllocationHandle, +} + +/// An opaque handle identifying an allocation inside an allocator. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +#[repr(transparent)] +pub struct AllocationHandle(pub *mut ()); + +unsafe impl Send for AllocationHandle {} +unsafe impl Sync for AllocationHandle {} + /// Error that can be returned when creating an [allocation] using a [memory allocator]. /// /// [allocation]: MemoryAlloc @@ -690,12 +744,6 @@ pub enum MemoryAllocatorError { /// This is returned when using [`MemoryAllocatePreference::NeverAllocate`] and the allocation /// size exceeded the block size for all heaps of suitable memory types. BlockSizeExceeded, - - /// The block size for the suballocator was exceeded. - /// - /// This is returned when using [`GenericMemoryAllocator>>`] if - /// the allocation size exceeded `BLOCK_SIZE`. - SuballocatorBlockSizeExceeded, } impl Error for MemoryAllocatorError { @@ -720,9 +768,6 @@ impl Display for MemoryAllocatorError { "the allocation size was greater than the block size for all heaps of suitable \ memory types and dedicated allocations were explicitly forbidden" } - Self::SuballocatorBlockSizeExceeded => { - "the allocation size was greater than the suballocator's block size" - } }; f.write_str(msg) @@ -735,8 +780,8 @@ impl Display for MemoryAllocatorError { /// not suited to the task. /// /// See also [`GenericMemoryAllocator`] for details about the allocation algorithm, and -/// [`FreeListAllocator`] for details about the suballocation algorithm and example usage. -pub type StandardMemoryAllocator = GenericMemoryAllocator>; +/// [`FreeListAllocator`] for details about the suballocation algorithm. +pub type StandardMemoryAllocator = GenericMemoryAllocator; impl StandardMemoryAllocator { /// Creates a new `StandardMemoryAllocator` with default configuration. @@ -815,30 +860,31 @@ impl StandardMemoryAllocator { /// [suballocate]: Suballocator /// [the `MemoryAllocator` implementation]: Self#impl-MemoryAllocator-for-GenericMemoryAllocator #[derive(Debug)] -pub struct GenericMemoryAllocator { +pub struct GenericMemoryAllocator { device: InstanceOwnedDebugWrapper>, + buffer_image_granularity: DeviceAlignment, // Each memory type has a pool of `DeviceMemory` blocks. pools: ArrayVec, MAX_MEMORY_TYPES>, // Each memory heap has its own block size. block_sizes: ArrayVec, - allocation_type: AllocationType, + // Global mask of memory types. + memory_type_bits: u32, dedicated_allocation: bool, export_handle_types: ArrayVec, flags: MemoryAllocateFlags, - // Global mask of memory types. - memory_type_bits: u32, // How many `DeviceMemory` allocations should be allowed before restricting them. max_allocations: u32, } #[derive(Debug)] struct Pool { - blocks: RwLock>, + blocks: RwLock>>>, // This is cached here for faster access, so we don't need to hop through 3 pointers. memory_type: ash::vk::MemoryType, + atom_size: DeviceAlignment, } -impl GenericMemoryAllocator { +impl GenericMemoryAllocator { // This is a false-positive, we only use this const for static initialization. #[allow(clippy::declare_interior_mutable_const)] const EMPTY_POOL: Pool = Pool { @@ -847,6 +893,7 @@ impl GenericMemoryAllocator { property_flags: ash::vk::MemoryPropertyFlags::empty(), heap_index: 0, }, + atom_size: DeviceAlignment::MIN, }; /// Creates a new `GenericMemoryAllocator` using the provided suballocator `S` for @@ -857,7 +904,6 @@ impl GenericMemoryAllocator { /// - Panics if `create_info.block_sizes` is not sorted by threshold. /// - Panics if `create_info.block_sizes` contains duplicate thresholds. /// - Panics if `create_info.block_sizes` does not contain a baseline threshold of `0`. - /// - Panics if the block size for a heap exceeds the size of the heap. pub fn new( device: Arc, create_info: GenericMemoryAllocatorCreateInfo<'_, '_>, @@ -886,13 +932,17 @@ impl GenericMemoryAllocator { let GenericMemoryAllocatorCreateInfo { block_sizes, memory_type_bits, - allocation_type, dedicated_allocation, export_handle_types, mut device_address, _ne: _, } = create_info; + let buffer_image_granularity = device + .physical_device() + .properties() + .buffer_image_granularity; + let MemoryProperties { memory_types, memory_heaps, @@ -900,11 +950,24 @@ impl GenericMemoryAllocator { let mut pools = ArrayVec::new(memory_types.len(), [Self::EMPTY_POOL; MAX_MEMORY_TYPES]); - for (i, memory_type) in memory_types.iter().enumerate() { + for ( + i, + &MemoryType { + property_flags, + heap_index, + }, + ) in memory_types.iter().enumerate() + { pools[i].memory_type = ash::vk::MemoryType { - property_flags: memory_type.property_flags.into(), - heap_index: memory_type.heap_index, + property_flags: property_flags.into(), + heap_index, }; + + if property_flags.intersects(MemoryPropertyFlags::HOST_VISIBLE) + && !property_flags.intersects(MemoryPropertyFlags::HOST_COHERENT) + { + pools[i].atom_size = device.physical_device().properties().non_coherent_atom_size; + } } let block_sizes = { @@ -916,9 +979,6 @@ impl GenericMemoryAllocator { Err(idx) => idx.saturating_sub(1), }; sizes[i] = block_sizes[idx].1; - - // VUID-vkAllocateMemory-pAllocateInfo-01713 - assert!(sizes[i] <= memory_heap.size); } sizes @@ -955,9 +1015,9 @@ impl GenericMemoryAllocator { GenericMemoryAllocator { device: InstanceOwnedDebugWrapper(device), + buffer_image_granularity, pools, block_sizes, - allocation_type, dedicated_allocation, export_handle_types, flags, @@ -965,9 +1025,50 @@ impl GenericMemoryAllocator { max_allocations, } } + + #[cold] + fn allocate_device_memory( + &self, + memory_type_index: u32, + allocation_size: DeviceSize, + dedicated_allocation: Option>, + export_handle_types: ExternalMemoryHandleTypes, + ) -> Result, Validated> { + let mut memory = DeviceMemory::allocate( + self.device.clone(), + MemoryAllocateInfo { + allocation_size, + memory_type_index, + dedicated_allocation, + export_handle_types, + flags: self.flags, + ..Default::default() + }, + )?; + + if self.pools[memory_type_index as usize] + .memory_type + .property_flags + .intersects(ash::vk::MemoryPropertyFlags::HOST_VISIBLE) + { + // SAFETY: + // - We checked that the memory is host-visible. + // - The memory can't be mapped already, because we just allocated it. + // - Mapping the whole range is always valid. + unsafe { + memory.map_unchecked(MemoryMapInfo { + offset: 0, + size: memory.allocation_size(), + _ne: crate::NonExhaustive(()), + })?; + } + } + + Ok(Arc::new(memory)) + } } -unsafe impl MemoryAllocator for GenericMemoryAllocator { +unsafe impl MemoryAllocator for GenericMemoryAllocator { fn find_memory_type_index( &self, memory_type_bits: u32, @@ -1001,6 +1102,10 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { /// /// - `memory_type_index` - The index of the memory type to allocate from. /// + /// - `layout` - The layout of the allocation. + /// + /// - `allocation_type` - The type of resources that can be bound to the allocation. + /// /// - `never_allocate` - If `true` then the allocator should never allocate `DeviceMemory`, /// instead only suballocate from existing blocks. /// @@ -1026,15 +1131,10 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { fn allocate_from_type( &self, memory_type_index: u32, - create_info: SuballocationCreateInfo, + mut layout: DeviceLayout, + allocation_type: AllocationType, never_allocate: bool, ) -> Result { - let SuballocationCreateInfo { - layout, - allocation_type: _, - _ne: _, - } = create_info; - let size = layout.size(); let pool = &self.pools[memory_type_index as usize]; let block_size = self.block_sizes[pool.memory_type.heap_index as usize]; @@ -1043,6 +1143,8 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { return Err(MemoryAllocatorError::BlockSizeExceeded); } + layout = layout.align_to(pool.atom_size).unwrap(); + let mut blocks = if S::IS_BLOCKING { // If the allocation algorithm needs to block, then there's no point in trying to avoid // locks here either. In that case the best strategy is to take full advantage of it by @@ -1052,15 +1154,15 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { // huge amount of memory unless you configure your block sizes properly! let mut blocks = pool.blocks.write(); - blocks.sort_by_key(Suballocator::free_size); - let (Ok(idx) | Err(idx)) = blocks.binary_search_by_key(&size, Suballocator::free_size); + blocks.sort_by_key(|block| block.free_size()); + let (Ok(idx) | Err(idx)) = + blocks.binary_search_by_key(&size, |block| block.free_size()); + for block in &blocks[idx..] { - match block.allocate(create_info.clone()) { - Ok(allocation) => return Ok(allocation), - Err(SuballocatorError::BlockSizeExceeded) => { - return Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded); - } - Err(_) => {} + if let Ok(allocation) = + block.allocate(layout, allocation_type, self.buffer_image_granularity) + { + return Ok(allocation); } } @@ -1076,33 +1178,26 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { // has the same performance as trying to allocate. let blocks = pool.blocks.read(); + // Search in reverse order because we always append new blocks at the end. for block in blocks.iter().rev() { - match block.allocate(create_info.clone()) { - Ok(allocation) => return Ok(allocation), - // This can happen when using the `PoolAllocator` if the allocation - // size is greater than `BLOCK_SIZE`. - Err(SuballocatorError::BlockSizeExceeded) => { - return Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded); - } - Err(_) => {} + if let Ok(allocation) = + block.allocate(layout, allocation_type, self.buffer_image_granularity) + { + return Ok(allocation); } } let len = blocks.len(); drop(blocks); let blocks = pool.blocks.write(); + if blocks.len() > len { // Another thread beat us to it and inserted a fresh block, try to suballocate it. - match blocks[len].allocate(create_info.clone()) { - Ok(allocation) => return Ok(allocation), - // This can happen if this is the first block that was inserted and when using - // the `PoolAllocator` if the allocation size is greater than - // `BLOCK_SIZE`. - Err(SuballocatorError::BlockSizeExceeded) => { - return Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded); - } - Err(_) => {} + if let Ok(allocation) = + blocks[len].allocate(layout, allocation_type, self.buffer_image_granularity) + { + return Ok(allocation); } } @@ -1111,11 +1206,13 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { // For bump allocators, first do a garbage sweep and try to allocate again. if S::NEEDS_CLEANUP { - blocks.iter_mut().for_each(Suballocator::cleanup); - blocks.sort_unstable_by_key(Suballocator::free_size); + blocks.iter_mut().for_each(|block| block.cleanup()); + blocks.sort_unstable_by_key(|block| block.free_size()); if let Some(block) = blocks.last() { - if let Ok(allocation) = block.allocate(create_info.clone()) { + if let Ok(allocation) = + block.allocate(layout, allocation_type, self.buffer_image_granularity) + { return Ok(allocation); } } @@ -1137,14 +1234,14 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { loop { let allocation_size = block_size >> i; - match self.allocate_dedicated( + match self.allocate_device_memory( memory_type_index, allocation_size, None, export_handle_types, ) { - Ok(allocation) => { - break S::new(allocation); + Ok(device_memory) => { + break Block::new(device_memory); } // Retry up to 3 times, halving the allocation size each time so long as the // resulting size is still large enough. @@ -1161,18 +1258,13 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { blocks.push(block); let block = blocks.last().unwrap(); - match block.allocate(create_info) { + match block.allocate(layout, allocation_type, self.buffer_image_granularity) { Ok(allocation) => Ok(allocation), // This can't happen as we always allocate a block of sufficient size. Err(SuballocatorError::OutOfRegionMemory) => unreachable!(), // This can't happen as the block is fresher than Febreze and we're still holding an // exclusive lock. Err(SuballocatorError::FragmentedRegion) => unreachable!(), - // This can happen if this is the first block that was inserted and when using the - // `PoolAllocator` if the allocation size is greater than `BLOCK_SIZE`. - Err(SuballocatorError::BlockSizeExceeded) => { - Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded) - } } } @@ -1258,12 +1350,6 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { _ne: _, } = create_info; - let create_info = SuballocationCreateInfo { - layout, - allocation_type, - _ne: crate::NonExhaustive(()), - }; - let size = layout.size(); let mut memory_type_index = self @@ -1295,7 +1381,6 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { dedicated_allocation, export_handle_types, ) - .map_err(MemoryAllocatorError::AllocateDeviceMemory) } else { if size > block_size / 2 { prefers_dedicated_allocation = true; @@ -1313,30 +1398,34 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { dedicated_allocation, export_handle_types, ) - .map_err(MemoryAllocatorError::AllocateDeviceMemory) // Fall back to suballocation. .or_else(|err| { self.allocate_from_type( memory_type_index, - create_info.clone(), + layout, + allocation_type, true, // A dedicated allocation already failed. ) .map_err(|_| err) }) } else { - self.allocate_from_type(memory_type_index, create_info.clone(), false) - // Fall back to dedicated allocation. It is possible that the 1/8 - // block size tried was greater than the allocation size, so - // there's hope. - .or_else(|_| { - self.allocate_dedicated( - memory_type_index, - size, - dedicated_allocation, - export_handle_types, - ) - .map_err(MemoryAllocatorError::AllocateDeviceMemory) - }) + self.allocate_from_type( + memory_type_index, + layout, + allocation_type, + false, + ) + // Fall back to dedicated allocation. It is possible that the 1/8 + // block size tried was greater than the allocation size, so + // there's hope. + .or_else(|_| { + self.allocate_dedicated( + memory_type_index, + size, + dedicated_allocation, + export_handle_types, + ) + }) } } } @@ -1345,24 +1434,18 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { return Err(MemoryAllocatorError::DedicatedAllocationRequired); } - self.allocate_from_type(memory_type_index, create_info.clone(), true) + self.allocate_from_type(memory_type_index, layout, allocation_type, true) } - MemoryAllocatePreference::AlwaysAllocate => self - .allocate_dedicated( - memory_type_index, - size, - dedicated_allocation, - export_handle_types, - ) - .map_err(MemoryAllocatorError::AllocateDeviceMemory), + MemoryAllocatePreference::AlwaysAllocate => self.allocate_dedicated( + memory_type_index, + size, + dedicated_allocation, + export_handle_types, + ), }; match res { Ok(allocation) => return Ok(allocation), - // This is not recoverable. - Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded) => { - return Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded); - } // Try a different memory type. Err(err) => { memory_type_bits &= !(1 << memory_type_index); @@ -1381,43 +1464,54 @@ unsafe impl MemoryAllocator for GenericMemoryAllocator { allocation_size: DeviceSize, dedicated_allocation: Option>, export_handle_types: ExternalMemoryHandleTypes, - ) -> Result> { - let mut memory = DeviceMemory::allocate( - self.device.clone(), - MemoryAllocateInfo { - allocation_size, + ) -> Result { + let device_memory = self + .allocate_device_memory( memory_type_index, + allocation_size, dedicated_allocation, export_handle_types, - flags: self.flags, - ..Default::default() - }, - )?; + ) + .map_err(MemoryAllocatorError::AllocateDeviceMemory)?; - if self.pools[memory_type_index as usize] - .memory_type - .property_flags - .intersects(ash::vk::MemoryPropertyFlags::HOST_VISIBLE) - { - // SAFETY: - // - We checked that the memory is host-visible. - // - The memory can't be mapped already, because we just allocated it. - // - Mapping the whole range is always valid. - unsafe { - memory.map_unchecked(MemoryMapInfo { - offset: 0, - size: memory.allocation_size(), - _ne: crate::NonExhaustive(()), - })?; + Ok(MemoryAlloc { + device_memory, + suballocation: None, + allocation_type: AllocationType::Unknown, + allocation_handle: AllocationHandle(ptr::null_mut()), + }) + } + + unsafe fn deallocate(&self, allocation: MemoryAlloc) { + if let Some(suballocation) = allocation.suballocation { + let block_ptr = allocation.allocation_handle.0 as *const Block; + + // TODO: Maybe do a similar check for dedicated blocks. + #[cfg(debug_assertions)] + { + let memory_type_index = allocation.device_memory.memory_type_index(); + let pool = self.pools[memory_type_index as usize].blocks.read(); + + assert!( + pool.iter() + .any(|block| &**block as *const Block == block_ptr), + "attempted to deallocate a memory block that does not belong to this allocator", + ); } + + // SAFETY: The caller must guarantee that `allocation` refers to one allocated by + // `self`, therefore `block_ptr` must be the same one we gave out on allocation. We + // know that this pointer must be valid, because all blocks are boxed and pinned in + // memory and because a block isn't dropped until the allocator itself is dropped, at + // which point it would be impossible to call this method. We also know that it must be + // valid to create a reference to the block, because we only ever access it via shared + // references. + let block = &*block_ptr; + + // SAFETY: The caller must guarantee that `allocation` refers to a currently allocated + // allocation of `self`. + block.deallocate(suballocation); } - - let mut allocation = MemoryAlloc::new(memory); - - // SAFETY: The memory is freshly allocated. - unsafe { allocation.set_allocation_type(self.allocation_type) }; - - Ok(allocation) } } @@ -1433,10 +1527,11 @@ unsafe impl MemoryAllocator for Arc { fn allocate_from_type( &self, memory_type_index: u32, - create_info: SuballocationCreateInfo, + layout: DeviceLayout, + allocation_type: AllocationType, never_allocate: bool, ) -> Result { - (**self).allocate_from_type(memory_type_index, create_info, never_allocate) + (**self).allocate_from_type(memory_type_index, layout, allocation_type, never_allocate) } fn allocate( @@ -1460,7 +1555,7 @@ unsafe impl MemoryAllocator for Arc { allocation_size: DeviceSize, dedicated_allocation: Option>, export_handle_types: ExternalMemoryHandleTypes, - ) -> Result> { + ) -> Result { (**self).allocate_dedicated( memory_type_index, allocation_size, @@ -1468,14 +1563,65 @@ unsafe impl MemoryAllocator for Arc { export_handle_types, ) } + + unsafe fn deallocate(&self, allocation: MemoryAlloc) { + (**self).deallocate(allocation) + } } -unsafe impl DeviceOwned for GenericMemoryAllocator { +unsafe impl DeviceOwned for GenericMemoryAllocator { fn device(&self) -> &Arc { &self.device } } +#[derive(Debug)] +struct Block { + device_memory: Arc, + suballocator: S, +} + +impl Block { + fn new(device_memory: Arc) -> Box { + let suballocator = S::new(0, device_memory.allocation_size()); + + Box::new(Block { + device_memory, + suballocator, + }) + } + + fn allocate( + &self, + layout: DeviceLayout, + allocation_type: AllocationType, + buffer_image_granularity: DeviceAlignment, + ) -> Result { + let suballocation = + self.suballocator + .allocate(layout, allocation_type, buffer_image_granularity)?; + + Ok(MemoryAlloc { + device_memory: self.device_memory.clone(), + suballocation: Some(suballocation), + allocation_type, + allocation_handle: AllocationHandle(self as *const Block as _), + }) + } + + unsafe fn deallocate(&self, suballocation: Suballocation) { + self.suballocator.deallocate(suballocation) + } + + fn free_size(&self) -> DeviceSize { + self.suballocator.free_size() + } + + fn cleanup(&mut self) { + self.suballocator.cleanup(); + } +} + /// Parameters to create a new [`GenericMemoryAllocator`]. #[derive(Clone, Debug)] pub struct GenericMemoryAllocatorCreateInfo<'b, 'e> { @@ -1513,40 +1659,25 @@ pub struct GenericMemoryAllocatorCreateInfo<'b, 'e> { /// [`PROTECTED`]: MemoryPropertyFlags::DEVICE_COHERENT pub memory_type_bits: u32, - /// The allocation type that should be used for root allocations. - /// - /// You only need to worry about this if you're using [`PoolAllocator`] as the suballocator, as - /// all suballocations that the pool allocator makes inherit their allocation type from the - /// parent allocation. For the [`FreeListAllocator`] and the [`BuddyAllocator`] this must be - /// [`AllocationType::Unknown`] otherwise you will get panics. It does not matter what this is - /// when using the [`BumpAllocator`]. - /// - /// The default value is [`AllocationType::Unknown`]. - pub allocation_type: AllocationType, - /// Whether the allocator should use the dedicated allocation APIs. /// /// This means that when the allocator decides that an allocation should not be suballocated, /// but rather have its own block of [`DeviceMemory`], that that allocation will be made a - /// dedicated allocation. Otherwise they are still made free-standing ([root]) allocations, - /// just not [dedicated] ones. + /// dedicated allocation. Otherwise they are still given their own block of device memory, just + /// that that block won't be [dedicated] to a particular resource. /// /// Dedicated allocations are an optimization which may result in better performance, so there /// really is no reason to disable this option, unless the restrictions that they bring with /// them are a problem. Namely, a dedicated allocation must only be used for the resource it - /// was created for. Meaning that [reusing the memory] for something else is not possible, - /// [suballocating it] is not possible, and [aliasing it] is also not possible. + /// was created for. Meaning that reusing the memory for something else is not possible, + /// suballocating it is not possible, and aliasing it is also not possible. /// /// This option is silently ignored (treated as `false`) if the device API version is below 1.1 /// and the [`khr_dedicated_allocation`] extension is not enabled on the device. /// /// The default value is `true`. /// - /// [root]: MemoryAlloc::is_root - /// [dedicated]: MemoryAlloc::is_dedicated - /// [reusing the memory]: MemoryAlloc::try_unwrap - /// [suballocating it]: Suballocator - /// [aliasing it]: MemoryAlloc::alias + /// [dedicated]: DeviceMemory::is_dedicated /// [`khr_dedicated_allocation`]: crate::device::DeviceExtensions::khr_dedicated_allocation pub dedicated_allocation: bool, @@ -1592,7 +1723,6 @@ impl GenericMemoryAllocatorCreateInfo<'_, '_> { let &Self { block_sizes, memory_type_bits: _, - allocation_type: _, dedicated_allocation: _, export_handle_types, device_address: _, @@ -1651,7 +1781,6 @@ impl Default for GenericMemoryAllocatorCreateInfo<'_, '_> { GenericMemoryAllocatorCreateInfo { block_sizes: &[], memory_type_bits: u32::MAX, - allocation_type: AllocationType::Unknown, dedicated_allocation: true, export_handle_types: &[], device_address: true, diff --git a/vulkano/src/memory/allocator/suballocator.rs b/vulkano/src/memory/allocator/suballocator.rs index 7a3b183d..bb922ba4 100644 --- a/vulkano/src/memory/allocator/suballocator.rs +++ b/vulkano/src/memory/allocator/suballocator.rs @@ -14,518 +14,29 @@ //! [the parent module]: super use self::host::SlotId; -use super::{align_down, align_up, array_vec::ArrayVec, DeviceAlignment, DeviceLayout}; -use crate::{ - device::{Device, DeviceOwned}, - image::ImageTiling, - memory::{self, is_aligned, DeviceMemory, MappedMemoryRange, MemoryPropertyFlags}, - sync::HostAccessError, - DeviceSize, NonZeroDeviceSize, Validated, ValidationError, VulkanError, +use super::{ + align_down, align_up, array_vec::ArrayVec, AllocationHandle, DeviceAlignment, DeviceLayout, }; -use crossbeam_queue::ArrayQueue; +use crate::{image::ImageTiling, memory::is_aligned, DeviceSize, NonZeroDeviceSize}; use parking_lot::Mutex; use std::{ cell::Cell, cmp, error::Error, fmt::{self, Display}, - mem::{self, ManuallyDrop}, - ops::RangeBounds, - ptr::{self, NonNull}, - sync::{ - atomic::{AtomicU64, Ordering}, - Arc, - }, + ptr, + sync::atomic::{AtomicU64, Ordering}, }; -/// Memory allocations are portions of memory that are reserved for a specific resource or purpose. -/// -/// There's a few ways you can obtain a `MemoryAlloc` in Vulkano. Most commonly you will probably -/// want to use a [memory allocator]. If you already have a [`DeviceMemory`] block on hand that you -/// would like to turn into an allocation, you can use [the constructor]. Lastly, you can use a -/// [suballocator] if you want to create multiple smaller allocations out of a bigger one. -/// -/// [memory allocator]: super::MemoryAllocator -/// [the constructor]: Self::new -/// [suballocator]: Suballocator -#[derive(Debug)] -pub struct MemoryAlloc { - offset: DeviceSize, - size: DeviceSize, - // Needed when binding resources to the allocation in order to avoid aliasing memory. - allocation_type: AllocationType, - // Used by the suballocators to align allocations to the non-coherent atom size when the memory - // type is host-visible but not host-coherent. This will be `None` for any other memory type. - atom_size: Option, - // Used in the `Drop` impl to free the allocation if required. - parent: AllocParent, -} - -#[derive(Debug)] -enum AllocParent { - FreeList { - allocator: Arc, - id: SlotId, - }, - Buddy { - allocator: Arc, - order: usize, - offset: DeviceSize, - }, - Pool { - allocator: Arc, - index: DeviceSize, - }, - Bump(Arc), - Root(Arc), - Dedicated(DeviceMemory), -} - -impl MemoryAlloc { - /// Creates a new `MemoryAlloc`. - #[inline] - pub fn new(device_memory: DeviceMemory) -> Self { - // Sanity check: this would lead to UB when suballocating. - assert!(device_memory.allocation_size() <= DeviceLayout::MAX_SIZE); - - let device = device_memory.device(); - let physical_device = device.physical_device(); - let memory_type_index = device_memory.memory_type_index(); - let property_flags = &physical_device.memory_properties().memory_types - [memory_type_index as usize] - .property_flags; - - let atom_size = (property_flags.intersects(MemoryPropertyFlags::HOST_VISIBLE) - && !property_flags.intersects(MemoryPropertyFlags::HOST_COHERENT)) - .then_some(physical_device.properties().non_coherent_atom_size); - - MemoryAlloc { - offset: 0, - size: device_memory.allocation_size(), - allocation_type: AllocationType::Unknown, - atom_size, - parent: if device_memory.is_dedicated() { - AllocParent::Dedicated(device_memory) - } else { - AllocParent::Root(Arc::new(device_memory)) - }, - } - } - - /// Returns the offset of the allocation within the [`DeviceMemory`] block. - #[inline] - pub fn offset(&self) -> DeviceSize { - self.offset - } - - /// Returns the size of the allocation. - #[inline] - pub fn size(&self) -> DeviceSize { - self.size - } - - /// Returns the type of resources that can be bound to this allocation. - #[inline] - pub fn allocation_type(&self) -> AllocationType { - self.allocation_type - } - - /// Returns the mapped pointer to a range of the allocation, or returns [`None`] if ouf of - /// bounds. - /// - /// `range` is specified in bytes relative to the beginning of `self` and must fall within the - /// range of the memory mapping given to [`DeviceMemory::map`]. - /// - /// See [`MappingState::slice`] for the safety invariants of the returned pointer. - /// - /// [`MappingState::slice`]: crate::memory::MappingState::slice - #[inline] - pub fn mapped_slice( - &self, - range: impl RangeBounds, - ) -> Option, HostAccessError>> { - let mut range = memory::range(range, ..self.size())?; - range.start += self.offset(); - range.end += self.offset(); - - let res = if let Some(state) = self.device_memory().mapping_state() { - state.slice(range).ok_or(HostAccessError::OutOfMappedRange) - } else { - Err(HostAccessError::NotHostMapped) - }; - - Some(res) - } - - #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] - #[inline] - pub unsafe fn mapped_slice_unchecked( - &self, - range: impl RangeBounds, - ) -> Result, HostAccessError> { - let mut range = memory::range_unchecked(range, ..self.size()); - range.start += self.offset(); - range.end += self.offset(); - - if let Some(state) = self.device_memory().mapping_state() { - state.slice(range).ok_or(HostAccessError::OutOfMappedRange) - } else { - Err(HostAccessError::NotHostMapped) - } - } - - pub(crate) fn atom_size(&self) -> Option { - self.atom_size - } - - /// Invalidates the host (CPU) cache for a range of the allocation. - /// - /// If the device memory is not [host-coherent], you must call this function before the memory - /// is read by the host, if the device previously wrote to the memory. It has no effect if the - /// memory is host-coherent. - /// - /// # Safety - /// - /// - If there are memory writes by the device that have not been propagated into the host - /// cache, then there must not be any references in Rust code to any portion of the specified - /// `memory_range`. - /// - /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT - /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size - #[inline] - pub unsafe fn invalidate_range( - &self, - memory_range: MappedMemoryRange, - ) -> Result<(), Validated> { - self.validate_memory_range(&memory_range)?; - - self.device_memory() - .invalidate_range(self.create_memory_range(memory_range)) - } - - #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] - #[inline] - pub unsafe fn invalidate_range_unchecked( - &self, - memory_range: MappedMemoryRange, - ) -> Result<(), VulkanError> { - self.device_memory() - .invalidate_range_unchecked(self.create_memory_range(memory_range)) - } - - /// Flushes the host cache for a range of the allocation. - /// - /// If the device memory is not [host-coherent], you must call this function after writing to - /// the memory, if the device is going to read the memory. It has no effect if the memory is - /// host-coherent. - /// - /// # Safety - /// - /// - There must be no operations pending or executing in a device queue, that access the - /// specified `memory_range`. - /// - /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT - /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size - #[inline] - pub unsafe fn flush_range( - &self, - memory_range: MappedMemoryRange, - ) -> Result<(), Validated> { - self.validate_memory_range(&memory_range)?; - - self.device_memory() - .flush_range(self.create_memory_range(memory_range)) - } - - #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] - #[inline] - pub unsafe fn flush_range_unchecked( - &self, - memory_range: MappedMemoryRange, - ) -> Result<(), VulkanError> { - self.device_memory() - .flush_range_unchecked(self.create_memory_range(memory_range)) - } - - fn validate_memory_range( - &self, - memory_range: &MappedMemoryRange, - ) -> Result<(), Box> { - let &MappedMemoryRange { - offset, - size, - _ne: _, - } = memory_range; - - if !(offset <= self.size() && size <= self.size() - offset) { - return Err(Box::new(ValidationError { - context: "memory_range".into(), - problem: "is not contained within the allocation".into(), - ..Default::default() - })); - } - - Ok(()) - } - - fn create_memory_range(&self, memory_range: MappedMemoryRange) -> MappedMemoryRange { - let MappedMemoryRange { - mut offset, - mut size, - _ne: _, - } = memory_range; - - let memory = self.device_memory(); - - offset += self.offset(); - - // VUID-VkMappedMemoryRange-size-01390 - if memory_range.offset + size == self.size() { - // We can align the end of the range like this without aliasing other allocations, - // because the suballocators ensure that all allocations are aligned to the atom size - // for non-host-coherent host-visible memory. - let end = cmp::min( - align_up(offset + size, memory.atom_size()), - memory.allocation_size(), - ); - size = end - offset; - } - - MappedMemoryRange { - offset, - size, - _ne: crate::NonExhaustive(()), - } - } - - /// Returns the underlying block of [`DeviceMemory`]. - #[inline] - pub fn device_memory(&self) -> &DeviceMemory { - match &self.parent { - AllocParent::FreeList { allocator, .. } => &allocator.device_memory, - AllocParent::Buddy { allocator, .. } => &allocator.device_memory, - AllocParent::Pool { allocator, .. } => &allocator.device_memory, - AllocParent::Bump(allocator) => &allocator.device_memory, - AllocParent::Root(device_memory) => device_memory, - AllocParent::Dedicated(device_memory) => device_memory, - } - } - - /// Returns the parent allocation if this allocation is a [suballocation], otherwise returns - /// [`None`]. - /// - /// [suballocation]: Suballocator - #[inline] - pub fn parent_allocation(&self) -> Option<&Self> { - match &self.parent { - AllocParent::FreeList { allocator, .. } => Some(&allocator.region), - AllocParent::Buddy { allocator, .. } => Some(&allocator.region), - AllocParent::Pool { allocator, .. } => Some(&allocator.region), - AllocParent::Bump(allocator) => Some(&allocator.region), - AllocParent::Root(_) => None, - AllocParent::Dedicated(_) => None, - } - } - - /// Returns `true` if this allocation is the root of the [memory hierarchy]. - /// - /// [memory hierarchy]: Suballocator#memory-hierarchies - #[inline] - pub fn is_root(&self) -> bool { - matches!(&self.parent, AllocParent::Root(_)) - } - - /// Returns `true` if this allocation is a [dedicated allocation]. - /// - /// [dedicated allocation]: crate::memory::MemoryAllocateInfo#structfield.dedicated_allocation - #[inline] - pub fn is_dedicated(&self) -> bool { - matches!(&self.parent, AllocParent::Dedicated(_)) - } - - /// Returns the underlying block of [`DeviceMemory`] if this allocation [is the root - /// allocation] and is not [aliased], otherwise returns the allocation back wrapped in [`Err`]. - /// - /// [is the root allocation]: Self::is_root - /// [aliased]: Self::alias - #[inline] - pub fn try_unwrap(self) -> Result { - let this = ManuallyDrop::new(self); - - // SAFETY: This is safe because even if a panic happens, `self.parent` can not be - // double-freed since `self` was wrapped in `ManuallyDrop`. If we fail to unwrap the - // `DeviceMemory`, the copy of `self.parent` is forgotten and only then is the - // `ManuallyDrop` wrapper removed from `self`. - match unsafe { ptr::read(&this.parent) } { - AllocParent::Root(device_memory) => { - Arc::try_unwrap(device_memory).map_err(|device_memory| { - mem::forget(device_memory); - ManuallyDrop::into_inner(this) - }) - } - parent => { - mem::forget(parent); - Err(ManuallyDrop::into_inner(this)) - } - } - } - - /// Duplicates the allocation, creating aliased memory. Returns [`None`] if the allocation [is - /// a dedicated allocation]. - /// - /// You might consider using this method if you want to optimize memory usage by aliasing - /// render targets for example, in which case you will have to double and triple check that the - /// memory is not used concurrently unless it only involves reading. You are highly discouraged - /// from doing this unless you have a reason to. - /// - /// # Safety - /// - /// - You must ensure memory accesses are synchronized yourself. - /// - /// [memory hierarchy]: Suballocator#memory-hierarchies - /// [is a dedicated allocation]: Self::is_dedicated - #[inline] - pub unsafe fn alias(&self) -> Option { - self.root().map(|device_memory| MemoryAlloc { - parent: AllocParent::Root(device_memory.clone()), - ..*self - }) - } - - fn root(&self) -> Option<&Arc> { - match &self.parent { - AllocParent::FreeList { allocator, .. } => Some(&allocator.device_memory), - AllocParent::Buddy { allocator, .. } => Some(&allocator.device_memory), - AllocParent::Pool { allocator, .. } => Some(&allocator.device_memory), - AllocParent::Bump(allocator) => Some(&allocator.device_memory), - AllocParent::Root(device_memory) => Some(device_memory), - AllocParent::Dedicated(_) => None, - } - } - - /// Increases the offset of the allocation by the specified `amount` and shrinks its size by - /// the same amount. - /// - /// # Panics - /// - /// - Panics if the `amount` exceeds the size of the allocation. - #[inline] - pub fn shift(&mut self, amount: DeviceSize) { - assert!(amount <= self.size); - - unsafe { self.set_offset(self.offset + amount) }; - self.size -= amount; - } - - /// Shrinks the size of the allocation to the specified `new_size`. - /// - /// # Panics - /// - /// - Panics if the `new_size` exceeds the current size of the allocation. - #[inline] - pub fn shrink(&mut self, new_size: DeviceSize) { - assert!(new_size <= self.size); - - self.size = new_size; - } - - /// Sets the offset of the allocation without checking for memory aliasing. - /// - /// See also [`shift`], which moves the offset safely. - /// - /// # Safety - /// - /// - You must ensure that the allocation doesn't alias any other allocations within the - /// [`DeviceMemory`] block, and if it does, then you must ensure memory accesses are - /// synchronized yourself. - /// - You must ensure the allocation still fits inside the `DeviceMemory` block. - /// - /// [`shift`]: Self::shift - #[inline] - pub unsafe fn set_offset(&mut self, new_offset: DeviceSize) { - self.offset = new_offset; - } - - /// Sets the size of the allocation without checking for memory aliasing. - /// - /// See also [`shrink`], which sets the size safely. - /// - /// # Safety - /// - /// - You must ensure that the allocation doesn't alias any other allocations within the - /// [`DeviceMemory`] block, and if it does, then you must ensure memory accesses are - /// synchronized yourself. - /// - You must ensure the allocation still fits inside the `DeviceMemory` block. - /// - /// [`shrink`]: Self::shrink - #[inline] - pub unsafe fn set_size(&mut self, new_size: DeviceSize) { - self.size = new_size; - } - - /// Sets the allocation type. - /// - /// This might cause memory aliasing due to [buffer-image granularity] conflicts if the - /// allocation type is [`Linear`] or [`NonLinear`] and is changed to a different one. - /// - /// # Safety - /// - /// - You must ensure that the allocation doesn't alias any other allocations within the - /// [`DeviceMemory`] block, and if it does, then you must ensure memory accesses are - /// synchronized yourself. - /// - /// [buffer-image granularity]: super#buffer-image-granularity - /// [`Linear`]: AllocationType::Linear - /// [`NonLinear`]: AllocationType::NonLinear - #[inline] - pub unsafe fn set_allocation_type(&mut self, new_type: AllocationType) { - self.allocation_type = new_type; - } -} - -impl Drop for MemoryAlloc { - #[inline] - fn drop(&mut self) { - match &self.parent { - AllocParent::FreeList { allocator, id } => { - unsafe { allocator.free(*id) }; - } - AllocParent::Buddy { - allocator, - order, - offset, - } => { - unsafe { allocator.free(*order, *offset) }; - } - AllocParent::Pool { allocator, index } => { - unsafe { allocator.free(*index) }; - } - // The bump allocator can't free individually, but we need to keep a reference to it so - // it don't get reset or dropped while in use. - AllocParent::Bump(_) => {} - // A root allocation frees itself once all references to the `DeviceMemory` are dropped. - AllocParent::Root(_) => {} - // Dedicated allocations free themselves when the `DeviceMemory` is dropped. - AllocParent::Dedicated(_) => {} - } - } -} - -unsafe impl DeviceOwned for MemoryAlloc { - #[inline] - fn device(&self) -> &Arc { - self.device_memory().device() - } -} - /// Suballocators are used to divide a *region* into smaller *suballocations*. /// /// # Regions /// /// As the name implies, a region is a contiguous portion of memory. It may be the whole dedicated -/// block of [`DeviceMemory`], or only a part of it. Regions are just [allocations] like any other, -/// but we use this term to refer specifically to an allocation that is to be suballocated. Every -/// suballocator is created with a region to work with. +/// block of [`DeviceMemory`], or only a part of it. Or it may be a buffer, or only a part of a +/// buffer. Regions are just allocations like any other, but we use this term to refer specifically +/// to an allocation that is to be suballocated. Every suballocator is created with a region to +/// work with. /// /// # Free-lists /// @@ -539,7 +50,7 @@ unsafe impl DeviceOwned for MemoryAlloc { /// Different applications have wildly different allocation needs, and there's no way to cover them /// all with a single type of allocator. Furthermore, different allocators have different /// trade-offs and are best suited to specific tasks. To account for all possible use-cases, -/// Vulkano offers the ability to create *memory hierarchies*. We refer to the [`DeviceMemory`] as +/// Vulkano offers the ability to create *memory hierarchies*. We refer to the `DeviceMemory` as /// the root of any such hierarchy, even though technically the driver has levels that are further /// up, because those `DeviceMemory` blocks need to be allocated from physical memory [pages] /// themselves, but since those levels are not accessible to us we don't need to consider them. You @@ -547,60 +58,17 @@ unsafe impl DeviceOwned for MemoryAlloc { /// memory within a `DeviceMemory` block. You can suballocate the root into regions, which are then /// suballocated into further regions and so on, creating hierarchies of arbitrary height. /// -/// As an added bonus, memory hierarchies lend themselves perfectly to the concept of composability -/// we all love so much, making them a natural fit for Rust. For one, a region can be allocated any -/// way, and fed into any suballocator. Also, once you are done with a branch of a hierarchy, -/// meaning there are no more suballocations in use within the region of that branch, and you would -/// like to reuse the region, you can do so safely! All suballocators have a `try_into_region` -/// method for this purpose. This means that you can replace one suballocator with another without -/// consulting any of the higher levels in the hierarchy. -/// /// # Examples /// -/// Allocating a region to suballocatate: -/// -/// ``` -/// use vulkano::memory::{DeviceMemory, MemoryAllocateInfo, MemoryPropertyFlags, MemoryType}; -/// use vulkano::memory::allocator::MemoryAlloc; -/// # let device: std::sync::Arc = return; -/// -/// // First you need to find a suitable memory type. -/// let memory_type_index = device -/// .physical_device() -/// .memory_properties() -/// .memory_types -/// .iter() -/// .enumerate() -/// // In a real-world scenario, you would probably want to rank the memory types based on your -/// // requirements, instead of picking the first one that satisfies them. Also, you have to -/// // take the requirements of the resources you want to allocate memory for into consideration. -/// .find_map(|(index, MemoryType { property_flags, .. })| { -/// property_flags.intersects(MemoryPropertyFlags::DEVICE_LOCAL).then_some(index) -/// }) -/// .unwrap() as u32; -/// -/// let region = MemoryAlloc::new( -/// DeviceMemory::allocate( -/// device.clone(), -/// MemoryAllocateInfo { -/// allocation_size: 64 * 1024 * 1024, -/// memory_type_index, -/// ..Default::default() -/// }, -/// ) -/// .unwrap(), -/// ); -/// -/// // You can now feed `region` into any suballocator. -/// ``` +/// TODO /// /// # Implementing the trait /// -/// Please don't. +/// TODO /// -/// [allocations]: MemoryAlloc +/// [`DeviceMemory`]: crate::memory::DeviceMemory /// [pages]: super#pages -pub unsafe trait Suballocator: DeviceOwned { +pub unsafe trait Suballocator { /// Whether this allocator needs to block or not. /// /// This is used by the [`GenericMemoryAllocator`] to specialize the allocation strategy to the @@ -620,32 +88,59 @@ pub unsafe trait Suballocator: DeviceOwned { /// Creates a new suballocator for the given [region]. /// + /// # Arguments + /// + /// - `region_offset` - The offset where the region begins. + /// + /// - `region_size` - The size of the region. + /// /// [region]: Self#regions - fn new(region: MemoryAlloc) -> Self + fn new(region_offset: DeviceSize, region_size: DeviceSize) -> Self where Self: Sized; /// Creates a new suballocation within the [region]. /// + /// # Arguments + /// + /// - `layout` - The layout of the allocation. + /// + /// - `allocation_type` - The type of resources that can be bound to the allocation. + /// + /// - `buffer_image_granularity` - The [buffer-image granularity] device property. + /// + /// This is provided as an argument here rather than on construction of the allocator to + /// allow for optimizations: if you are only ever going to be creating allocations with the + /// same `allocation_type` using this allocator, then you may hard-code this to + /// [`DeviceAlignment::MIN`], in which case, after inlining, the logic for aligning the + /// allocation to the buffer-image-granularity based on the allocation type of surrounding + /// allocations can be optimized out. + /// + /// You don't need to consider the buffer-image granularity for instance when suballocating a + /// buffer, or when suballocating a [`DeviceMemory`] block that's only ever going to be used + /// for optimal images. However, if you do allocate both linear and non-linear resources and + /// don't specify the buffer-image granularity device property here, **you will get undefined + /// behavior down the line**. Note that [`AllocationType::Unknown`] counts as both linear and + /// non-linear at the same time: if you always use this as the `allocation_type` using this + /// allocator, then it is valid to set this to `DeviceAlignment::MIN`, but **you must ensure + /// all allocations are aligned to the buffer-image granularity at minimum**. + /// /// [region]: Self#regions + /// [buffer-image granularity]: super#buffer-image-granularity + /// [`DeviceMemory`]: crate::memory::DeviceMemory fn allocate( &self, - create_info: SuballocationCreateInfo, - ) -> Result; + layout: DeviceLayout, + allocation_type: AllocationType, + buffer_image_granularity: DeviceAlignment, + ) -> Result; - /// Returns a reference to the underlying [region]. + /// Deallocates the given `suballocation`. /// - /// [region]: Self#regions - fn region(&self) -> &MemoryAlloc; - - /// Returns the underlying [region] if there are no other strong references to the allocator, - /// otherwise hands you back the allocator wrapped in [`Err`]. Allocations made with the - /// allocator count as references for as long as they are alive. + /// # Safety /// - /// [region]: Self#regions - fn try_into_region(self) -> Result - where - Self: Sized; + /// - `suballocation` must refer to a **currently allocated** suballocation of `self`. + unsafe fn deallocate(&self, suballocation: Suballocation); /// Returns the total amount of free space that is left in the [region]. /// @@ -656,51 +151,15 @@ pub unsafe trait Suballocator: DeviceOwned { fn cleanup(&mut self); } -/// Parameters to create a new [allocation] using a [suballocator]. -/// -/// [allocation]: MemoryAlloc -/// [suballocator]: Suballocator -#[derive(Clone, Debug)] -pub struct SuballocationCreateInfo { - /// Memory layout required for the allocation. - /// - /// The default value is a layout with size [`DeviceLayout::MAX_SIZE`] and alignment - /// [`DeviceAlignment::MIN`], which must be overridden. - pub layout: DeviceLayout, - - /// Type of resources that can be bound to the allocation. - /// - /// The default value is [`AllocationType::Unknown`]. - pub allocation_type: AllocationType, - - pub _ne: crate::NonExhaustive, -} - -impl Default for SuballocationCreateInfo { - #[inline] - fn default() -> Self { - SuballocationCreateInfo { - layout: DeviceLayout::new( - NonZeroDeviceSize::new(DeviceLayout::MAX_SIZE).unwrap(), - DeviceAlignment::MIN, - ) - .unwrap(), - allocation_type: AllocationType::Unknown, - _ne: crate::NonExhaustive(()), - } - } -} - /// Tells the [suballocator] what type of resource will be bound to the allocation, so that it can /// optimize memory usage while still respecting the [buffer-image granularity]. /// /// [suballocator]: Suballocator /// [buffer-image granularity]: super#buffer-image-granularity #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -#[non_exhaustive] pub enum AllocationType { - /// The type of resource is unknown, it might be either linear or non-linear. What this means is - /// that allocations created with this type must always be aligned to the buffer-image + /// The type of resource is unknown, it might be either linear or non-linear. What this means + /// is that allocations created with this type must always be aligned to the buffer-image /// granularity. Unknown = 0, @@ -724,9 +183,29 @@ impl From for AllocationType { } } -/// Error that can be returned when using a [suballocator]. +/// An allocation made using a [suballocator]. /// /// [suballocator]: Suballocator +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub struct Suballocation { + /// The **absolute** offset within the [region]. That means that this is already offset by the + /// region's offset, **not relative to beginning of the region**. This offset will be aligned + /// to the requested alignment. + /// + /// [region]: Suballocator#regions + pub offset: DeviceSize, + + /// The size of the allocation. This will be exactly equal to the requested size. + pub size: DeviceSize, + + /// An opaque handle identifying the allocation within the allocator. + pub handle: AllocationHandle, +} + +/// Error that can be returned when creating an [allocation] using a [suballocator]. +/// +/// [allocation]: Suballocation +/// [suballocator]: Suballocator #[derive(Clone, Debug, PartialEq, Eq)] pub enum SuballocatorError { /// There is no more space available in the region. @@ -734,32 +213,18 @@ pub enum SuballocatorError { /// The region has enough free space to satisfy the request but is too fragmented. FragmentedRegion, - - /// The allocation was larger than the allocator's block size, meaning that this error would - /// arise with the parameters no matter the state the allocator was in. - /// - /// This can be used to let the [`GenericMemoryAllocator`] know that allocating a new block of - /// [`DeviceMemory`] and trying to suballocate it with the same parameters would not solve the - /// issue. - /// - /// [`GenericMemoryAllocator`]: super::GenericMemoryAllocator - BlockSizeExceeded, } impl Error for SuballocatorError {} impl Display for SuballocatorError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{}", - match self { - Self::OutOfRegionMemory => "out of region memory", - Self::FragmentedRegion => "the region is too fragmented", - Self::BlockSizeExceeded => - "the allocation size was greater than the suballocator's block size", - } - ) + let msg = match self { + Self::OutOfRegionMemory => "out of region memory", + Self::FragmentedRegion => "the region is too fragmented", + }; + + f.write_str(msg) } } @@ -771,7 +236,7 @@ impl Display for SuballocatorError { /// are made. Therefore, this allocator is best suited for long-lived allocations. If you need /// to create allocations of various sizes, but can't afford this fragmentation, then the /// [`BuddyAllocator`] is your best buddy. If you need to create allocations which share a similar -/// size, consider the [`PoolAllocator`]. Lastly, if you need to allocate very often, then +/// size, consider an allocation pool. Lastly, if you need to allocate very often, then /// [`BumpAllocator`] is best suited. /// /// See also [the `Suballocator` implementation]. @@ -803,115 +268,7 @@ impl Display for SuballocatorError { /// would be *O*(*n*). However, this scenario is extremely unlikely which is why we are not /// considering it in the above analysis. Additionally, if your free-list is filled with /// allocations that all have the same size then that seems pretty sus. Sounds like you're in dire -/// need of a `PoolAllocator`. -/// -/// # Examples -/// -/// Most commonly you will not want to use this suballocator directly but rather use it within -/// [`GenericMemoryAllocator`], having one global [`StandardMemoryAllocator`] for most if not all -/// of your allocation needs. -/// -/// Basic usage as a global allocator for long-lived resources: -/// -/// ``` -/// use vulkano::{ -/// format::Format, -/// image::{Image, ImageCreateInfo, ImageType, ImageUsage}, -/// memory::allocator::{AllocationCreateInfo, StandardMemoryAllocator}, -/// }; -/// -/// # let device: std::sync::Arc = return; -/// # -/// let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); -/// -/// # fn read_textures() -> Vec> { Vec::new() } -/// # -/// // Allocate some resources. -/// let textures_data: Vec> = read_textures(); -/// let textures = textures_data.into_iter().map(|data| { -/// let image = Image::new( -/// &memory_allocator, -/// ImageCreateInfo { -/// image_type: ImageType::Dim2d, -/// format: Format::R8G8B8A8_UNORM, -/// extent: [1024, 1024, 1], -/// usage: ImageUsage::SAMPLED, -/// ..Default::default() -/// }, -/// AllocationCreateInfo::default(), -/// ) -/// .unwrap(); -/// -/// // ...upload data... -/// -/// image -/// }); -/// ``` -/// -/// For use in allocating arenas for [`SubbufferAllocator`]: -/// -/// ``` -/// use std::sync::Arc; -/// use vulkano::{ -/// buffer::{ -/// allocator::{SubbufferAllocator, SubbufferAllocatorCreateInfo}, -/// BufferUsage, -/// }, -/// memory::allocator::{MemoryTypeFilter, StandardMemoryAllocator}, -/// }; -/// -/// # let device: std::sync::Arc = return; -/// # -/// // We need to wrap the allocator in an `Arc` so that we can share ownership of it. -/// let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); -/// let buffer_allocator = SubbufferAllocator::new( -/// memory_allocator.clone(), -/// SubbufferAllocatorCreateInfo { -/// buffer_usage: BufferUsage::TRANSFER_SRC, -/// memory_type_filter: MemoryTypeFilter::PREFER_HOST -/// | MemoryTypeFilter::HOST_SEQUENTIAL_WRITE, -/// ..Default::default() -/// }, -/// ); -/// -/// // You can continue using `memory_allocator` for other things. -/// ``` -/// -/// Sometimes, it is neccessary to suballocate an allocation. If you don't want to allocate new -/// [`DeviceMemory`] blocks to suballocate, perhaps because of concerns of memory wastage or -/// allocation efficiency, you can use your existing global `StandardMemoryAllocator` to allocate -/// regions for your suballocation needs: -/// -/// ``` -/// use vulkano::memory::allocator::{ -/// DeviceLayout, MemoryAllocator, StandardMemoryAllocator, SuballocationCreateInfo, -/// }; -/// -/// # let device: std::sync::Arc = return; -/// let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); -/// -/// # let memory_type_index = 0; -/// let region = memory_allocator.allocate_from_type( -/// // When choosing the index, you have to make sure that the memory type is allowed for the -/// // type of resource that you want to bind the suballocations to. -/// memory_type_index, -/// SuballocationCreateInfo { -/// layout: DeviceLayout::from_size_alignment( -/// // This will be the size of your region. -/// 16 * 1024 * 1024, -/// // It generally does not matter what the alignment is, because you're going to -/// // suballocate the allocation anyway, and not bind it directly. -/// 1, -/// ) -/// .unwrap(), -/// ..Default::default() -/// }, -/// false, -/// ) -/// .unwrap(); -/// -/// // You can now feed the `region` into any suballocator. -/// ``` +/// need of an allocation pool. /// /// [suballocator]: Suballocator /// [free-list]: Suballocator#free-lists @@ -919,122 +276,55 @@ impl Display for SuballocatorError { /// [the `Suballocator` implementation]: Suballocator#impl-Suballocator-for-Arc /// [internal fragmentation]: super#internal-fragmentation /// [alignment requirements]: super#alignment -/// [`GenericMemoryAllocator`]: super::GenericMemoryAllocator -/// [`StandardMemoryAllocator`]: super::StandardMemoryAllocator -/// [`SubbufferAllocator`]: crate::buffer::allocator::SubbufferAllocator #[derive(Debug)] pub struct FreeListAllocator { - region: MemoryAlloc, - device_memory: Arc, - buffer_image_granularity: DeviceAlignment, - atom_size: DeviceAlignment, + region_offset: DeviceSize, // Total memory remaining in the region. free_size: AtomicU64, state: Mutex, } -impl FreeListAllocator { +unsafe impl Suballocator for FreeListAllocator { + const IS_BLOCKING: bool = true; + + const NEEDS_CLEANUP: bool = false; + /// Creates a new `FreeListAllocator` for the given [region]. /// - /// # Panics - /// - /// - Panics if `region.allocation_type` is not [`AllocationType::Unknown`]. This is done to - /// avoid checking for a special case of [buffer-image granularity] conflict. - /// - Panics if `region` is a [dedicated allocation]. - /// /// [region]: Suballocator#regions - /// [buffer-image granularity]: super#buffer-image-granularity - /// [dedicated allocation]: MemoryAlloc::is_dedicated - pub fn new(region: MemoryAlloc) -> Arc { + fn new(region_offset: DeviceSize, region_size: DeviceSize) -> Self { // NOTE(Marc): This number was pulled straight out of my a- const AVERAGE_ALLOCATION_SIZE: DeviceSize = 64 * 1024; - assert!(region.allocation_type == AllocationType::Unknown); + let free_size = AtomicU64::new(region_size); - let device_memory = region - .root() - .expect("dedicated allocations can't be suballocated") - .clone(); - let buffer_image_granularity = device_memory - .device() - .physical_device() - .properties() - .buffer_image_granularity; - - let atom_size = region.atom_size.unwrap_or(DeviceAlignment::MIN); - let free_size = AtomicU64::new(region.size); - - let capacity = (region.size / AVERAGE_ALLOCATION_SIZE) as usize; + let capacity = (region_size / AVERAGE_ALLOCATION_SIZE) as usize; let mut nodes = host::PoolAllocator::new(capacity + 64); let mut free_list = Vec::with_capacity(capacity / 16 + 16); let root_id = nodes.allocate(SuballocationListNode { prev: None, next: None, - offset: region.offset, - size: region.size, + offset: region_offset, + size: region_size, ty: SuballocationType::Free, }); free_list.push(root_id); let state = Mutex::new(FreeListAllocatorState { nodes, free_list }); - Arc::new(FreeListAllocator { - region, - device_memory, - buffer_image_granularity, - atom_size, + FreeListAllocator { + region_offset, free_size, state, - }) + } } - /// # Safety - /// - /// - `node_id` must refer to an occupied suballocation allocated by `self`. - unsafe fn free(&self, node_id: SlotId) { - let mut state = self.state.lock(); - let node = state.nodes.get_mut(node_id); - - debug_assert!(node.ty != SuballocationType::Free); - - // Suballocation sizes are constrained by the size of the region, so they can't possibly - // overflow when added up. - self.free_size.fetch_add(node.size, Ordering::Release); - - node.ty = SuballocationType::Free; - state.coalesce(node_id); - state.free(node_id); - } -} - -unsafe impl Suballocator for Arc { - const IS_BLOCKING: bool = true; - - const NEEDS_CLEANUP: bool = false; - - #[inline] - fn new(region: MemoryAlloc) -> Self { - FreeListAllocator::new(region) - } - - /// Creates a new suballocation within the [region]. - /// - /// # Errors - /// - /// - Returns [`OutOfRegionMemory`] if there are no free suballocations large enough so satisfy - /// the request. - /// - Returns [`FragmentedRegion`] if a suballocation large enough to satisfy the request could - /// have been formed, but wasn't because of [external fragmentation]. - /// - /// [region]: Suballocator#regions - /// [`allocate`]: Suballocator::allocate - /// [`OutOfRegionMemory`]: SuballocatorError::OutOfRegionMemory - /// [`FragmentedRegion`]: SuballocatorError::FragmentedRegion - /// [external fragmentation]: super#external-fragmentation #[inline] fn allocate( &self, - create_info: SuballocationCreateInfo, - ) -> Result { + layout: DeviceLayout, + allocation_type: AllocationType, + buffer_image_granularity: DeviceAlignment, + ) -> Result { fn has_granularity_conflict(prev_ty: SuballocationType, ty: AllocationType) -> bool { if prev_ty == SuballocationType::Free { false @@ -1045,14 +335,8 @@ unsafe impl Suballocator for Arc { } } - let SuballocationCreateInfo { - layout, - allocation_type, - _ne: _, - } = create_info; - let size = layout.size(); - let alignment = cmp::max(layout.alignment(), self.atom_size); + let alignment = layout.alignment(); let mut state = self.state.lock(); unsafe { @@ -1077,18 +361,22 @@ unsafe impl Suballocator for Arc { // `DeviceLayout::MAX_SIZE`. let mut offset = align_up(suballoc.offset, alignment); - if let Some(prev_id) = suballoc.prev { - let prev = state.nodes.get(prev_id); + if buffer_image_granularity != DeviceAlignment::MIN { + debug_assert!(is_aligned(self.region_offset, buffer_image_granularity)); - if are_blocks_on_same_page( - prev.offset, - prev.size, - offset, - self.buffer_image_granularity, - ) && has_granularity_conflict(prev.ty, allocation_type) - { - // This is overflow-safe for the same reason as above. - offset = align_up(offset, self.buffer_image_granularity); + if let Some(prev_id) = suballoc.prev { + let prev = state.nodes.get(prev_id); + + if are_blocks_on_same_page( + prev.offset, + prev.size, + offset, + buffer_image_granularity, + ) && has_granularity_conflict(prev.ty, allocation_type) + { + // This is overflow-safe for the same reason as above. + offset = align_up(offset, buffer_image_granularity); + } } } @@ -1106,15 +394,10 @@ unsafe impl Suballocator for Arc { // constrained by the remaining size of the region. self.free_size.fetch_sub(size, Ordering::Release); - return Ok(MemoryAlloc { + return Ok(Suballocation { offset, size, - allocation_type, - atom_size: self.region.atom_size, - parent: AllocParent::FreeList { - allocator: self.clone(), - id, - }, + handle: AllocationHandle(id.get() as _), }); } } @@ -1133,13 +416,23 @@ unsafe impl Suballocator for Arc { } #[inline] - fn region(&self) -> &MemoryAlloc { - &self.region - } + unsafe fn deallocate(&self, suballocation: Suballocation) { + // SAFETY: The caller must guarantee that `suballocation` refers to a currently allocated + // allocation of `self`. + let node_id = SlotId::new(suballocation.handle.0 as _); - #[inline] - fn try_into_region(self) -> Result { - Arc::try_unwrap(self).map(|allocator| allocator.region) + let mut state = self.state.lock(); + let node = state.nodes.get_mut(node_id); + + debug_assert!(node.ty != SuballocationType::Free); + + // Suballocation sizes are constrained by the size of the region, so they can't possibly + // overflow when added up. + self.free_size.fetch_add(node.size, Ordering::Release); + + node.ty = SuballocationType::Free; + state.coalesce(node_id); + state.free(node_id); } #[inline] @@ -1151,13 +444,6 @@ unsafe impl Suballocator for Arc { fn cleanup(&mut self) {} } -unsafe impl DeviceOwned for FreeListAllocator { - #[inline] - fn device(&self) -> &Arc { - self.device_memory.device() - } -} - #[derive(Debug)] struct FreeListAllocatorState { nodes: host::PoolAllocator, @@ -1418,8 +704,8 @@ impl FreeListAllocatorState { /// wasting 45% of the memory. Use this algorithm if you need to create and free a lot of /// allocations, which would cause too much external fragmentation when using /// [`FreeListAllocator`]. However, if the sizes of your allocations are more or less the same, -/// then the [`PoolAllocator`] would be a better choice and would eliminate external fragmentation -/// completely. +/// then using an allocation pool would be a better choice and would eliminate external +/// fragmentation completely. /// /// See also [the `Suballocator` implementation]. /// @@ -1443,8 +729,8 @@ impl FreeListAllocatorState { /// /// It's safe to say that this algorithm works best if you have some level of control over your /// allocation sizes, so that you don't end up allocating twice as much memory. An example of this -/// would be when you need to allocate regions for other allocators, such as the `PoolAllocator` or -/// the [`BumpAllocator`]. +/// would be when you need to allocate regions for other allocators, such as for an allocation pool +/// or the [`BumpAllocator`]. /// /// # Efficiency /// @@ -1453,45 +739,14 @@ impl FreeListAllocatorState { /// freeing is *O*(*m*) in the worst case where *m* is the highest order, which equates to *O*(log /// (*n*)) where *n* is the size of the region. /// -/// # Examples -/// -/// Basic usage together with [`GenericMemoryAllocator`], to allocate resources that have a -/// moderately low life span (for example if you have a lot of images, each of which needs to be -/// resized every now and then): -/// -/// ``` -/// use std::sync::Arc; -/// use vulkano::memory::allocator::{ -/// BuddyAllocator, GenericMemoryAllocator, GenericMemoryAllocatorCreateInfo, -/// }; -/// -/// # let device: std::sync::Arc = return; -/// let memory_allocator = GenericMemoryAllocator::>::new( -/// device.clone(), -/// GenericMemoryAllocatorCreateInfo { -/// // Your block sizes must be powers of two, because `BuddyAllocator` only accepts -/// // power-of-two-sized regions. -/// block_sizes: &[(0, 64 * 1024 * 1024)], -/// ..Default::default() -/// }, -/// ) -/// .unwrap(); -/// -/// // Now you can use `memory_allocator` to allocate whatever it is you need. -/// ``` -/// /// [suballocator]: Suballocator /// [internal fragmentation]: super#internal-fragmentation /// [external fragmentation]: super#external-fragmentation /// [the `Suballocator` implementation]: Suballocator#impl-Suballocator-for-Arc /// [region]: Suballocator#regions -/// [`GenericMemoryAllocator`]: super::GenericMemoryAllocator #[derive(Debug)] pub struct BuddyAllocator { - region: MemoryAlloc, - device_memory: Arc, - buffer_image_granularity: DeviceAlignment, - atom_size: DeviceAlignment, + region_offset: DeviceSize, // Total memory remaining in the region. free_size: AtomicU64, state: Mutex, @@ -1503,133 +758,53 @@ impl BuddyAllocator { /// Arbitrary maximum number of orders, used to avoid a 2D `Vec`. Together with a minimum node /// size of 16, this is enough for a 64GiB region. const MAX_ORDERS: usize = 32; +} + +unsafe impl Suballocator for BuddyAllocator { + const IS_BLOCKING: bool = true; + + const NEEDS_CLEANUP: bool = false; /// Creates a new `BuddyAllocator` for the given [region]. /// /// # Panics /// - /// - Panics if `region.allocation_type` is not [`AllocationType::Unknown`]. This is done to - /// avoid checking for a special case of [buffer-image granularity] conflict. - /// - Panics if `region.size` is not a power of two. - /// - Panics if `region.size` is not in the range \[16B, 64GiB\]. - /// - Panics if `region` is a [dedicated allocation]. + /// - Panics if `region_size` is not a power of two. + /// - Panics if `region_size` is not in the range \[16B, 64GiB\]. /// /// [region]: Suballocator#regions - /// [buffer-image granularity]: super#buffer-image-granularity - /// [dedicated allocation]: MemoryAlloc::is_dedicated - #[inline] - pub fn new(region: MemoryAlloc) -> Arc { + fn new(region_offset: DeviceSize, region_size: DeviceSize) -> Self { const EMPTY_FREE_LIST: Vec = Vec::new(); - assert!(region.allocation_type == AllocationType::Unknown); - assert!(region.size.is_power_of_two()); - assert!(region.size >= BuddyAllocator::MIN_NODE_SIZE); + assert!(region_size.is_power_of_two()); + assert!(region_size >= BuddyAllocator::MIN_NODE_SIZE); - let max_order = (region.size / BuddyAllocator::MIN_NODE_SIZE).trailing_zeros() as usize; + let max_order = (region_size / BuddyAllocator::MIN_NODE_SIZE).trailing_zeros() as usize; assert!(max_order < BuddyAllocator::MAX_ORDERS); - let device_memory = region - .root() - .expect("dedicated allocations can't be suballocated") - .clone(); - let buffer_image_granularity = device_memory - .device() - .physical_device() - .properties() - .buffer_image_granularity; - let atom_size = region.atom_size.unwrap_or(DeviceAlignment::MIN); - let free_size = AtomicU64::new(region.size); + let free_size = AtomicU64::new(region_size); let mut free_list = ArrayVec::new(max_order + 1, [EMPTY_FREE_LIST; BuddyAllocator::MAX_ORDERS]); // The root node has the lowest offset and highest order, so it's the whole region. - free_list[max_order].push(region.offset); + free_list[max_order].push(region_offset); let state = Mutex::new(BuddyAllocatorState { free_list }); - Arc::new(BuddyAllocator { - region, - device_memory, - buffer_image_granularity, - atom_size, + BuddyAllocator { + region_offset, free_size, state, - }) - } - - /// # Safety - /// - /// - `order` and `offset` must refer to an occupied suballocation allocated by `self`. - unsafe fn free(&self, order: usize, mut offset: DeviceSize) { - let min_order = order; - let mut state = self.state.lock(); - - debug_assert!(!state.free_list[order].contains(&offset)); - - // Try to coalesce nodes while incrementing the order. - for (order, free_list) in state.free_list.iter_mut().enumerate().skip(min_order) { - // This can't discard any bits because `order` is confined to the range - // [0, log(region.size / BuddyAllocator::MIN_NODE_SIZE)]. - let size = BuddyAllocator::MIN_NODE_SIZE << order; - - // This can't overflow because the offsets in the free-list are confined to the range - // [region.offset, region.offset + region.size). - let buddy_offset = ((offset - self.region.offset) ^ size) + self.region.offset; - - match free_list.binary_search(&buddy_offset) { - // If the buddy is in the free-list, we can coalesce. - Ok(index) => { - free_list.remove(index); - offset = cmp::min(offset, buddy_offset); - } - // Otherwise free the node. - Err(_) => { - let (Ok(index) | Err(index)) = free_list.binary_search(&offset); - free_list.insert(index, offset); - - // This can't discard any bits for the same reason as above. - let size = BuddyAllocator::MIN_NODE_SIZE << min_order; - - // The sizes of suballocations allocated by `self` are constrained by that of - // its region, so they can't possibly overflow when added up. - self.free_size.fetch_add(size, Ordering::Release); - - break; - } - } } } -} -unsafe impl Suballocator for Arc { - const IS_BLOCKING: bool = true; - - const NEEDS_CLEANUP: bool = false; - - #[inline] - fn new(region: MemoryAlloc) -> Self { - BuddyAllocator::new(region) - } - - /// Creates a new suballocation within the [region]. - /// - /// # Errors - /// - /// - Returns [`OutOfRegionMemory`] if there are no free nodes large enough so satisfy the - /// request. - /// - Returns [`FragmentedRegion`] if a node large enough to satisfy the request could have - /// been formed, but wasn't because of [external fragmentation]. - /// - /// [region]: Suballocator#regions - /// [`allocate`]: Suballocator::allocate - /// [`OutOfRegionMemory`]: SuballocatorError::OutOfRegionMemory - /// [`FragmentedRegion`]: SuballocatorError::FragmentedRegion - /// [external fragmentation]: super#external-fragmentation #[inline] fn allocate( &self, - create_info: SuballocationCreateInfo, - ) -> Result { + layout: DeviceLayout, + allocation_type: AllocationType, + buffer_image_granularity: DeviceAlignment, + ) -> Result { /// Returns the largest power of two smaller or equal to the input, or zero if the input is /// zero. fn prev_power_of_two(val: DeviceSize) -> DeviceSize { @@ -1644,22 +819,20 @@ unsafe impl Suballocator for Arc { } } - let SuballocationCreateInfo { - layout, - allocation_type, - _ne: _, - } = create_info; - let mut size = layout.size(); - let mut alignment = cmp::max(layout.alignment(), self.atom_size); + let mut alignment = layout.alignment(); - if allocation_type == AllocationType::Unknown - || allocation_type == AllocationType::NonLinear - { - // This can't overflow because `DeviceLayout` guarantees that `size` doesn't exceed - // `DeviceLayout::MAX_SIZE`. - size = align_up(size, self.buffer_image_granularity); - alignment = cmp::max(alignment, self.buffer_image_granularity); + if buffer_image_granularity != DeviceAlignment::MIN { + debug_assert!(is_aligned(self.region_offset, buffer_image_granularity)); + + if allocation_type == AllocationType::Unknown + || allocation_type == AllocationType::NonLinear + { + // This can't overflow because `DeviceLayout` guarantees that `size` doesn't exceed + // `DeviceLayout::MAX_SIZE`. + size = align_up(size, buffer_image_granularity); + alignment = cmp::max(alignment, buffer_image_granularity); + } } // `DeviceLayout` guarantees that its size does not exceed `DeviceLayout::MAX_SIZE`, @@ -1704,16 +877,10 @@ unsafe impl Suballocator for Arc { // constrained by the remaining size of the region. self.free_size.fetch_sub(size, Ordering::Release); - return Ok(MemoryAlloc { + return Ok(Suballocation { offset, size: layout.size(), - allocation_type, - atom_size: self.region.atom_size, - parent: AllocParent::Buddy { - allocator: self.clone(), - order: min_order, - offset, // The offset in the alloc itself can change. - }, + handle: AllocationHandle(min_order as _), }); } } @@ -1728,13 +895,47 @@ unsafe impl Suballocator for Arc { } #[inline] - fn region(&self) -> &MemoryAlloc { - &self.region - } + unsafe fn deallocate(&self, suballocation: Suballocation) { + let mut offset = suballocation.offset; + let order = suballocation.handle.0 as usize; - #[inline] - fn try_into_region(self) -> Result { - Arc::try_unwrap(self).map(|allocator| allocator.region) + let min_order = order; + let mut state = self.state.lock(); + + debug_assert!(!state.free_list[order].contains(&offset)); + + // Try to coalesce nodes while incrementing the order. + for (order, free_list) in state.free_list.iter_mut().enumerate().skip(min_order) { + // This can't discard any bits because `order` is confined to the range + // [0, log(region.size / BuddyAllocator::MIN_NODE_SIZE)]. + let size = BuddyAllocator::MIN_NODE_SIZE << order; + + // This can't overflow because the offsets in the free-list are confined to the range + // [region.offset, region.offset + region.size). + let buddy_offset = ((offset - self.region_offset) ^ size) + self.region_offset; + + match free_list.binary_search(&buddy_offset) { + // If the buddy is in the free-list, we can coalesce. + Ok(index) => { + free_list.remove(index); + offset = cmp::min(offset, buddy_offset); + } + // Otherwise free the node. + Err(_) => { + let (Ok(index) | Err(index)) = free_list.binary_search(&offset); + free_list.insert(index, offset); + + // This can't discard any bits for the same reason as above. + let size = BuddyAllocator::MIN_NODE_SIZE << min_order; + + // The sizes of suballocations allocated by `self` are constrained by that of + // its region, so they can't possibly overflow when added up. + self.free_size.fetch_add(size, Ordering::Release); + + break; + } + } + } } /// Returns the total amount of free space left in the [region] that is available to the @@ -1751,13 +952,6 @@ unsafe impl Suballocator for Arc { fn cleanup(&mut self) {} } -unsafe impl DeviceOwned for BuddyAllocator { - #[inline] - fn device(&self) -> &Arc { - self.device_memory.device() - } -} - #[derive(Debug)] struct BuddyAllocatorState { // Every order has its own free-list for convenience, so that we don't have to traverse a tree. @@ -1766,352 +960,6 @@ struct BuddyAllocatorState { free_list: ArrayVec, { BuddyAllocator::MAX_ORDERS }>, } -/// A [suballocator] using a pool of fixed-size blocks as a [free-list]. -/// -/// Since the size of the blocks is fixed, you can not create allocations bigger than that. You can -/// create smaller ones, though, which leads to more and more [internal fragmentation] the smaller -/// the allocations get. This is generally a good trade-off, as internal fragmentation is nowhere -/// near as hard to deal with as [external fragmentation]. -/// -/// See also [the `Suballocator` implementation]. -/// -/// # Algorithm -/// -/// The free-list contains indices of blocks in the region that are available, so allocation -/// consists merely of popping an index from the free-list. The same goes for freeing, all that is -/// required is to push the index of the block into the free-list. Note that this is only possible -/// because the blocks have a fixed size. Due to this one fact, the free-list doesn't need to be -/// sorted or traversed. As long as there is a free block, it will do, no matter which block it is. -/// -/// Since the `PoolAllocator` doesn't keep a list of suballocations that are currently in use, -/// resolving [buffer-image granularity] conflicts on a case-by-case basis is not possible. -/// Therefore, it is an all or nothing situation: -/// -/// - you use the allocator for only one type of allocation, [`Linear`] or [`NonLinear`], or -/// - you allow both but align the blocks to the granularity so that no conflics can happen. -/// -/// The way this is done is that every suballocation inherits the allocation type of the region. -/// The latter is done by using a region whose allocation type is [`Unknown`]. You are discouraged -/// from using this type if you can avoid it. -/// -/// The block size can end up bigger than specified if the allocator is created with a region whose -/// allocation type is `Unknown`. In that case all blocks are aligned to the buffer-image -/// granularity, which may or may not cause signifficant memory usage increase. Say for example -/// your driver reports a granularity of 4KiB. If you need a block size of 8KiB, you would waste no -/// memory. On the other hand, if you needed a block size of 6KiB, you would be wasting 25% of the -/// memory. In such a scenario you are highly encouraged to use a different allocation type. -/// -/// The reverse is also true: with an allocation type other than `Unknown`, not all memory within a -/// block may be usable depending on the requested [suballocation]. For instance, with a block size -/// of 1152B (9 * 128B) and a suballocation with `alignment: 256`, a block at an odd index could -/// not utilize its first 128B, reducing its effective size to 1024B. This is usually only relevant -/// with small block sizes, as [alignment requirements] are usually rather small, but it completely -/// depends on the resource and driver. -/// -/// In summary, the block size you choose has a signifficant impact on internal fragmentation due -/// to the two reasons described above. You need to choose your block size carefully, *especially* -/// if you require small allocations. Some rough guidelines: -/// -/// - Always [align] your blocks to a sufficiently large power of 2. This does **not** mean your -/// block size must be a power of two. For example with a block size of 3KiB, your blocks would -/// be aligned to 1KiB. -/// - Prefer not using the allocation type `Unknown`. You can always create as many -/// `PoolAllocator`s as you like for different allocation types and sizes, and they can all work -/// within the same memory block. You should be safe from fragmentation if your blocks are -/// aligned to 1KiB. -/// - If you must use the allocation type `Unknown`, then you should be safe from fragmentation on -/// pretty much any driver if your blocks are aligned to 64KiB. Keep in mind that this might -/// change any time as new devices appear or new drivers come out. Always look at the properties -/// of the devices you want to support before relying on any such data. -/// -/// # Efficiency -/// -/// In theory, a pool allocator is the ideal one because it causes no external fragmentation, and -/// both allocation and freeing is *O*(1). It also never needs to lock and hence also lends itself -/// perfectly to concurrency. But of course, there is the trade-off that block sizes are not -/// dynamic. -/// -/// As you can imagine, the `PoolAllocator` is the perfect fit if you know the sizes of the -/// allocations you will be making, and they are more or less in the same size class. But this -/// allocation algorithm really shines when combined with others, as most do. For one, nothing is -/// stopping you from having multiple `PoolAllocator`s for many different size classes. You could -/// consider a pool of pools, by layering `PoolAllocator` with itself, but this would have the -/// downside that the regions of the pools for all size classes would have to match. Usually this -/// is not desired. If you want pools for different size classes to all have about the same number -/// of blocks, or you even know that some size classes require more or less blocks (because of how -/// many resources you will be allocating for each), then you need an allocator that can allocate -/// regions of different sizes. You can use the [`FreeListAllocator`] for this, if external -/// fragmentation is not an issue, otherwise you might consider using the [`BuddyAllocator`]. On -/// the other hand, you might also want to consider having a `PoolAllocator` at the top of a -/// [hierarchy]. Again, this allocator never needs to lock making it *the* perfect fit for a global -/// concurrent allocator, which hands out large regions which can then be suballocated locally on a -/// thread, by the [`BumpAllocator`] for example. -/// -/// # Examples -/// -/// Basic usage together with [`GenericMemoryAllocator`]: -/// -/// ``` -/// use std::sync::Arc; -/// use vulkano::memory::allocator::{ -/// GenericMemoryAllocator, GenericMemoryAllocatorCreateInfo, PoolAllocator, -/// }; -/// -/// # let device: std::sync::Arc = return; -/// let memory_allocator = GenericMemoryAllocator::>>::new( -/// device.clone(), -/// GenericMemoryAllocatorCreateInfo { -/// block_sizes: &[(0, 64 * 1024 * 1024)], -/// ..Default::default() -/// }, -/// ) -/// .unwrap(); -/// -/// // Now you can use `memory_allocator` to allocate whatever it is you need. -/// ``` -/// -/// [suballocator]: Suballocator -/// [free-list]: Suballocator#free-lists -/// [internal fragmentation]: super#internal-fragmentation -/// [external fragmentation]: super#external-fragmentation -/// [the `Suballocator` implementation]: Suballocator#impl-Suballocator-for-Arc> -/// [region]: Suballocator#regions -/// [buffer-image granularity]: super#buffer-image-granularity -/// [`Linear`]: AllocationType::Linear -/// [`NonLinear`]: AllocationType::NonLinear -/// [`Unknown`]: AllocationType::Unknown -/// [suballocation]: SuballocationCreateInfo -/// [alignment requirements]: super#memory-requirements -/// [align]: super#alignment -/// [hierarchy]: Suballocator#memory-hierarchies -/// [`GenericMemoryAllocator`]: super::GenericMemoryAllocator -#[derive(Debug)] -#[repr(transparent)] -pub struct PoolAllocator { - inner: PoolAllocatorInner, -} - -impl PoolAllocator { - /// Creates a new `PoolAllocator` for the given [region]. - /// - /// # Panics - /// - /// - Panics if `region.size < BLOCK_SIZE`. - /// - Panics if `region` is a [dedicated allocation]. - /// - /// [region]: Suballocator#regions - /// [dedicated allocation]: MemoryAlloc::is_dedicated - #[inline] - pub fn new( - region: MemoryAlloc, - #[cfg(test)] buffer_image_granularity: DeviceAlignment, - ) -> Arc { - Arc::new(PoolAllocator { - inner: PoolAllocatorInner::new( - region, - BLOCK_SIZE, - #[cfg(test)] - buffer_image_granularity, - ), - }) - } - - /// Size of a block. Can be bigger than `BLOCK_SIZE` due to alignment requirements. - #[inline] - pub fn block_size(&self) -> DeviceSize { - self.inner.block_size - } - - /// Total number of blocks available to the allocator. This is always equal to - /// `self.region().size() / self.block_size()`. - #[inline] - pub fn block_count(&self) -> usize { - self.inner.free_list.capacity() - } - - /// Number of free blocks. - #[inline] - pub fn free_count(&self) -> usize { - self.inner.free_list.len() - } -} - -unsafe impl Suballocator for Arc> { - const IS_BLOCKING: bool = false; - - const NEEDS_CLEANUP: bool = false; - - #[inline] - fn new(region: MemoryAlloc) -> Self { - PoolAllocator::new( - region, - #[cfg(test)] - DeviceAlignment::MIN, - ) - } - - /// Creates a new suballocation within the [region]. - /// - /// > **Note**: `create_info.allocation_type` is silently ignored because all suballocations - /// > inherit the allocation type from the region. - /// - /// # Errors - /// - /// - Returns [`OutOfRegionMemory`] if the [free-list] is empty. - /// - Returns [`OutOfRegionMemory`] if the allocation can't fit inside a block. Only the first - /// block in the free-list is tried, which means that if one block isn't usable due to - /// [internal fragmentation] but a different one would be, you still get this error. See the - /// [type-level documentation] for details on how to properly configure your allocator. - /// - Returns [`BlockSizeExceeded`] if `create_info.size` exceeds `BLOCK_SIZE`. - /// - /// [region]: Suballocator#regions - /// [`allocate`]: Suballocator::allocate - /// [`OutOfRegionMemory`]: SuballocatorError::OutOfRegionMemory - /// [free-list]: Suballocator#free-lists - /// [internal fragmentation]: super#internal-fragmentation - /// [type-level documentation]: PoolAllocator - /// [`BlockSizeExceeded`]: SuballocatorError::BlockSizeExceeded - #[inline] - fn allocate( - &self, - create_info: SuballocationCreateInfo, - ) -> Result { - // SAFETY: `PoolAllocator` and `PoolAllocatorInner` have the same layout. - // - // This is not quite optimal, because we are always cloning the `Arc` even if allocation - // fails, in which case the `Arc` gets cloned and dropped for no reason. Unfortunately, - // there is currently no way to turn `&Arc` into `&Arc` that is sound. - unsafe { Arc::from_raw(Arc::into_raw(self.clone()).cast::()) } - .allocate(create_info) - } - - #[inline] - fn region(&self) -> &MemoryAlloc { - &self.inner.region - } - - #[inline] - fn try_into_region(self) -> Result { - Arc::try_unwrap(self).map(|allocator| allocator.inner.region) - } - - #[inline] - fn free_size(&self) -> DeviceSize { - self.free_count() as DeviceSize * self.block_size() - } - - #[inline] - fn cleanup(&mut self) {} -} - -unsafe impl DeviceOwned for PoolAllocator { - #[inline] - fn device(&self) -> &Arc { - self.inner.device_memory.device() - } -} - -#[derive(Debug)] -struct PoolAllocatorInner { - region: MemoryAlloc, - device_memory: Arc, - atom_size: DeviceAlignment, - block_size: DeviceSize, - // Unsorted list of free block indices. - free_list: ArrayQueue, -} - -impl PoolAllocatorInner { - fn new( - region: MemoryAlloc, - mut block_size: DeviceSize, - #[cfg(test)] buffer_image_granularity: DeviceAlignment, - ) -> Self { - let device_memory = region - .root() - .expect("dedicated allocations can't be suballocated") - .clone(); - #[cfg(not(test))] - let buffer_image_granularity = device_memory - .device() - .physical_device() - .properties() - .buffer_image_granularity; - let atom_size = region.atom_size.unwrap_or(DeviceAlignment::MIN); - if region.allocation_type == AllocationType::Unknown { - block_size = align_up(block_size, buffer_image_granularity); - } - - let block_count = region.size / block_size; - let free_list = ArrayQueue::new(block_count as usize); - for i in 0..block_count { - free_list.push(i).unwrap(); - } - - PoolAllocatorInner { - region, - device_memory, - atom_size, - block_size, - free_list, - } - } - - fn allocate( - self: Arc, - create_info: SuballocationCreateInfo, - ) -> Result { - let SuballocationCreateInfo { - layout, - allocation_type: _, - _ne: _, - } = create_info; - - let size = layout.size(); - let alignment = cmp::max(layout.alignment(), self.atom_size); - let index = self - .free_list - .pop() - .ok_or(SuballocatorError::OutOfRegionMemory)?; - - // Indices in the free-list are confined to the range [0, region.size / block_size], so - // this can't overflow. - let relative_offset = index * self.block_size; - // This can't overflow because offsets are confined to the size of the root allocation, - // which can itself not exceed `DeviceLayout::MAX_SIZE`. - let offset = align_up(self.region.offset + relative_offset, alignment); - - if offset + size > self.region.offset + relative_offset + self.block_size { - let _ = self.free_list.push(index); - - return if size > self.block_size { - Err(SuballocatorError::BlockSizeExceeded) - } else { - // There is not enough space due to alignment requirements. - Err(SuballocatorError::OutOfRegionMemory) - }; - } - - Ok(MemoryAlloc { - offset, - size, - allocation_type: self.region.allocation_type, - atom_size: self.region.atom_size, - parent: AllocParent::Pool { - allocator: self, - index, - }, - }) - } - - /// # Safety - /// - /// - `index` must refer to an occupied suballocation allocated by `self`. - unsafe fn free(&self, index: DeviceSize) { - let _ = self.free_list.push(index); - } -} - /// A [suballocator] which can allocate dynamically, but can only free all allocations at once. /// /// With bump allocation, the used up space increases linearly as allocations are made and @@ -2137,12 +985,9 @@ impl PoolAllocatorInner { /// used wrong, and is very susceptible to [memory leaks]. /// /// Once you know that you are done with the allocations, meaning you know they have all been -/// dropped, you can safely reset the allocator using the [`try_reset`] method as long as the -/// allocator is not shared between threads. It is hard to safely reset a bump allocator that is -/// used concurrently. In such a scenario it's best not to reset it at all and instead drop it once -/// it reaches the end of the [region], freeing the region to a higher level in the [hierarchy] -/// once all threads have dropped their reference to the allocator. This is one of the reasons you -/// are generally advised to use one `BumpAllocator` per thread if you can. +/// dropped, you can safely reset the allocator using the [`reset`] method as long as the allocator +/// is not shared between threads. This is one of the reasons you are generally advised to use one +/// `BumpAllocator` per thread if you can. /// /// # Efficiency /// @@ -2163,119 +1008,56 @@ impl PoolAllocatorInner { /// [region]: Suballocator#regions /// [free-list]: Suballocator#free-lists /// [memory leaks]: super#leakage -/// [`try_reset`]: Self::try_reset +/// [`reset`]: Self::reset /// [hierarchy]: Suballocator#memory-hierarchies #[derive(Debug)] pub struct BumpAllocator { - region: MemoryAlloc, - device_memory: Arc, - buffer_image_granularity: DeviceAlignment, - atom_size: DeviceAlignment, + region_offset: DeviceSize, + region_size: DeviceSize, // Encodes the previous allocation type in the 2 least signifficant bits and the free start in // the rest. state: AtomicU64, } impl BumpAllocator { - /// Creates a new `BumpAllocator` for the given [region]. - /// - /// # Panics - /// - /// - Panics if `region` is a [dedicated allocation]. - /// - Panics if `region.size` exceeds `DeviceLayout::MAX_SIZE >> 2`. - /// - /// [region]: Suballocator#regions - /// [dedicated allocation]: MemoryAlloc::is_dedicated - pub fn new(region: MemoryAlloc) -> Arc { - // Sanity check: this would lead to UB because of the left-shifting by 2 needed to encode - // the free-start into the state. - assert!(region.size <= (DeviceLayout::MAX_SIZE >> 2)); - - let device_memory = region - .root() - .expect("dedicated allocations can't be suballocated") - .clone(); - let buffer_image_granularity = device_memory - .device() - .physical_device() - .properties() - .buffer_image_granularity; - let atom_size = region.atom_size.unwrap_or(DeviceAlignment::MIN); - let state = AtomicU64::new(region.allocation_type as DeviceSize); - - Arc::new(BumpAllocator { - region, - device_memory, - buffer_image_granularity, - atom_size, - state, - }) - } - - /// Resets the free-start back to the beginning of the [region] if there are no other strong - /// references to the allocator. + /// Resets the free-start back to the beginning of the [region]. /// /// [region]: Suballocator#regions #[inline] - pub fn try_reset(self: &mut Arc) -> Result<(), BumpAllocatorResetError> { - Arc::get_mut(self) - .map(|allocator| { - *allocator.state.get_mut() = allocator.region.allocation_type as DeviceSize; - }) - .ok_or(BumpAllocatorResetError) - } - - /// Resets the free-start to the beginning of the [region] without checking if there are other - /// strong references to the allocator. - /// - /// This could be useful if you cloned the [`Arc`] yourself, and can guarantee that no - /// allocations currently hold a reference to it. - /// - /// As a safe alternative, you can let the `Arc` do all the work. Simply drop it once it - /// reaches the end of the region. After all threads do that, the region will be freed to the - /// next level up the [hierarchy]. If you only use the allocator on one thread and need shared - /// ownership, you can use `Rc>>` together with [`try_reset`] for a - /// safe alternative as well. - /// - /// # Safety - /// - /// - All allocations made with the allocator must have been dropped. - /// - /// [region]: Suballocator#regions - /// [hierarchy]: Suballocator#memory-hierarchies - /// [`try_reset`]: Self::try_reset - #[inline] - pub unsafe fn reset_unchecked(&self) { - self.state - .store(self.region.allocation_type as DeviceSize, Ordering::Release); + pub fn reset(&mut self) { + *self.state.get_mut() = AllocationType::Unknown as DeviceSize; } } -unsafe impl Suballocator for Arc { +unsafe impl Suballocator for BumpAllocator { const IS_BLOCKING: bool = false; const NEEDS_CLEANUP: bool = true; - #[inline] - fn new(region: MemoryAlloc) -> Self { - BumpAllocator::new(region) - } - - /// Creates a new suballocation within the [region]. - /// - /// # Errors - /// - /// - Returns [`OutOfRegionMemory`] if the requested allocation can't fit in the free space - /// remaining in the region. + /// Creates a new `BumpAllocator` for the given [region]. /// /// [region]: Suballocator#regions - /// [`allocate`]: Suballocator::allocate - /// [`OutOfRegionMemory`]: SuballocatorError::OutOfRegionMemory + fn new(region_offset: DeviceSize, region_size: DeviceSize) -> Self { + // Sanity check: this would lead to UB because of the left-shifting by 2 needed to encode + // the free-start into the state. + assert!(region_size <= (DeviceLayout::MAX_SIZE >> 2)); + + let state = AtomicU64::new(AllocationType::Unknown as DeviceSize); + + BumpAllocator { + region_offset, + region_size, + state, + } + } + #[inline] fn allocate( &self, - create_info: SuballocationCreateInfo, - ) -> Result { + layout: DeviceLayout, + allocation_type: AllocationType, + buffer_image_granularity: DeviceAlignment, + ) -> Result { const SPIN_LIMIT: u32 = 6; // NOTE(Marc): The following code is a minimal version `Backoff` taken from @@ -2307,47 +1089,44 @@ unsafe impl Suballocator for Arc { prev_ty == AllocationType::Unknown || prev_ty != ty } - let SuballocationCreateInfo { - layout, - allocation_type, - _ne: _, - } = create_info; - let size = layout.size(); - let alignment = cmp::max(layout.alignment(), self.atom_size); + let alignment = layout.alignment(); let backoff = Backoff::new(); let mut state = self.state.load(Ordering::Relaxed); loop { let free_start = state >> 2; - let prev_alloc_type = match state & 0b11 { - 0 => AllocationType::Unknown, - 1 => AllocationType::Linear, - 2 => AllocationType::NonLinear, - _ => unreachable!(), - }; // These can't overflow because offsets are constrained by the size of the root // allocation, which can itself not exceed `DeviceLayout::MAX_SIZE`. - let prev_end = self.region.offset + free_start; + let prev_end = self.region_offset + free_start; let mut offset = align_up(prev_end, alignment); - if prev_end > 0 - && are_blocks_on_same_page(0, prev_end, offset, self.buffer_image_granularity) - && has_granularity_conflict(prev_alloc_type, allocation_type) - { - offset = align_up(offset, self.buffer_image_granularity); + if buffer_image_granularity != DeviceAlignment::MIN { + let prev_alloc_type = match state & 0b11 { + 0 => AllocationType::Unknown, + 1 => AllocationType::Linear, + 2 => AllocationType::NonLinear, + _ => unreachable!(), + }; + + if prev_end > 0 + && are_blocks_on_same_page(0, prev_end, offset, buffer_image_granularity) + && has_granularity_conflict(prev_alloc_type, allocation_type) + { + offset = align_up(offset, buffer_image_granularity); + } } - let relative_offset = offset - self.region.offset; + let relative_offset = offset - self.region_offset; let free_start = relative_offset + size; - if free_start > self.region.size { + if free_start > self.region_size { return Err(SuballocatorError::OutOfRegionMemory); } - // This can't discard any bits because we checked that `region.size` does not exceed + // This can't discard any bits because we checked that `region_size` does not exceed // `DeviceLayout::MAX_SIZE >> 2`. let new_state = free_start << 2 | allocation_type as DeviceSize; @@ -2358,12 +1137,10 @@ unsafe impl Suballocator for Arc { Ordering::Relaxed, ) { Ok(_) => { - return Ok(MemoryAlloc { + return Ok(Suballocation { offset, size, - allocation_type, - atom_size: self.region.atom_size, - parent: AllocParent::Bump(self.clone()), + handle: AllocationHandle(ptr::null_mut()), }); } Err(new_state) => { @@ -2375,42 +1152,18 @@ unsafe impl Suballocator for Arc { } #[inline] - fn region(&self) -> &MemoryAlloc { - &self.region - } - - #[inline] - fn try_into_region(self) -> Result { - Arc::try_unwrap(self).map(|allocator| allocator.region) + unsafe fn deallocate(&self, _suballocation: Suballocation) { + // such complex, very wow } #[inline] fn free_size(&self) -> DeviceSize { - self.region.size - (self.state.load(Ordering::Acquire) >> 2) + self.region_size - (self.state.load(Ordering::Acquire) >> 2) } #[inline] fn cleanup(&mut self) { - let _ = self.try_reset(); - } -} - -unsafe impl DeviceOwned for BumpAllocator { - #[inline] - fn device(&self) -> &Arc { - self.device_memory.device() - } -} - -/// Error that can be returned when resetting the [`BumpAllocator`]. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct BumpAllocatorResetError; - -impl Error for BumpAllocatorResetError {} - -impl Display for BumpAllocatorResetError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_str("the allocator is still in use") + self.reset(); } } @@ -2439,8 +1192,8 @@ mod host { /// Allocates objects from a pool on the host, which has the following benefits: /// - /// - Allocation is much faster because there is no need to consult the global allocator or even - /// worse, the operating system, each time a small object needs to be created. + /// - Allocation is much faster because there is no need to consult the global allocator or + /// even worse, the operating system, each time a small object needs to be created. /// - Freeing is extremely fast, because the whole pool can be dropped at once. This is /// particularily useful for linked structures, whose nodes need to be freed one-by-one by /// traversing the whole structure otherwise. @@ -2467,7 +1220,8 @@ mod host { } } - /// Allocates a slot and initializes it with the provided value. Returns the ID of the slot. + /// Allocates a slot and initializes it with the provided value. Returns the ID of the + /// slot. pub fn allocate(&mut self, val: T) -> SlotId { if let Some(id) = self.free_list.pop() { *unsafe { self.get_mut(id) } = val; @@ -2530,14 +1284,36 @@ mod host { /// of the actual ID to this `host` module, making it easier to reason about unsafe code. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub(super) struct SlotId(NonZeroUsize); + + impl SlotId { + /// # Safety + /// + /// - `val` must have previously acquired through [`SlotId::get`]. + pub unsafe fn new(val: usize) -> Self { + SlotId(NonZeroUsize::new(val).unwrap()) + } + + pub fn get(self) -> usize { + self.0.get() + } + } } #[cfg(test)] mod tests { use super::*; - use crate::memory::MemoryAllocateInfo; + use crossbeam_queue::ArrayQueue; use std::thread; + const fn unwrap(opt: Option) -> T { + match opt { + Some(x) => x, + None => panic!(), + } + } + + const DUMMY_LAYOUT: DeviceLayout = unwrap(DeviceLayout::from_size_alignment(1, 1)); + #[test] fn free_list_allocator_capacity() { const THREADS: DeviceSize = 12; @@ -2546,7 +1322,7 @@ mod tests { const REGION_SIZE: DeviceSize = (ALLOCATION_STEP * (THREADS + 1) * THREADS / 2) * ALLOCATIONS_PER_THREAD; - let allocator = dummy_allocator!(FreeListAllocator, REGION_SIZE); + let allocator = FreeListAllocator::new(0, REGION_SIZE); let allocs = ArrayQueue::new((ALLOCATIONS_PER_THREAD * THREADS) as usize); // Using threads to randomize allocation order. @@ -2555,215 +1331,155 @@ mod tests { let (allocator, allocs) = (&allocator, &allocs); scope.spawn(move || { - let info = dummy_info!(i * ALLOCATION_STEP); + let layout = DeviceLayout::from_size_alignment(i * ALLOCATION_STEP, 1).unwrap(); for _ in 0..ALLOCATIONS_PER_THREAD { allocs - .push(allocator.allocate(info.clone()).unwrap()) + .push( + allocator + .allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN) + .unwrap(), + ) .unwrap(); } }); } }); - assert!(allocator.allocate(dummy_info!()).is_err()); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN) + .is_err()); assert!(allocator.free_size() == 0); - drop(allocs); + for alloc in allocs { + unsafe { allocator.deallocate(alloc) }; + } + assert!(allocator.free_size() == REGION_SIZE); - assert!(allocator.allocate(dummy_info!(REGION_SIZE)).is_ok()); + let alloc = allocator + .allocate( + DeviceLayout::from_size_alignment(REGION_SIZE, 1).unwrap(), + AllocationType::Unknown, + DeviceAlignment::MIN, + ) + .unwrap(); + unsafe { allocator.deallocate(alloc) }; } #[test] fn free_list_allocator_respects_alignment() { const REGION_SIZE: DeviceSize = 10 * 256; + const LAYOUT: DeviceLayout = unwrap(DeviceLayout::from_size_alignment(1, 256)); - let info = dummy_info!(1, 256); - - let allocator = dummy_allocator!(FreeListAllocator, REGION_SIZE); + let allocator = FreeListAllocator::new(0, REGION_SIZE); let mut allocs = Vec::with_capacity(10); for _ in 0..10 { - allocs.push(allocator.allocate(info.clone()).unwrap()); + allocs.push( + allocator + .allocate(LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN) + .unwrap(), + ); } - assert!(allocator.allocate(info).is_err()); + assert!(allocator + .allocate(LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN) + .is_err()); assert!(allocator.free_size() == REGION_SIZE - 10); + + for alloc in allocs.drain(..) { + unsafe { allocator.deallocate(alloc) }; + } } #[test] fn free_list_allocator_respects_granularity() { - const GRANULARITY: DeviceSize = 16; - const REGION_SIZE: DeviceSize = 2 * GRANULARITY; + const GRANULARITY: DeviceAlignment = unwrap(DeviceAlignment::new(16)); + const REGION_SIZE: DeviceSize = 2 * GRANULARITY.as_devicesize(); - let allocator = dummy_allocator!(FreeListAllocator, REGION_SIZE, GRANULARITY); - let mut linear_allocs = Vec::with_capacity(GRANULARITY as usize); - let mut nonlinear_allocs = Vec::with_capacity(GRANULARITY as usize); + let allocator = FreeListAllocator::new(0, REGION_SIZE); + let mut linear_allocs = Vec::with_capacity(REGION_SIZE as usize / 2); + let mut nonlinear_allocs = Vec::with_capacity(REGION_SIZE as usize / 2); for i in 0..REGION_SIZE { if i % 2 == 0 { - linear_allocs.push(allocator.allocate(dummy_info_linear!()).unwrap()); + linear_allocs.push( + allocator + .allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY) + .unwrap(), + ); } else { - nonlinear_allocs.push(allocator.allocate(dummy_info_nonlinear!()).unwrap()); + nonlinear_allocs.push( + allocator + .allocate(DUMMY_LAYOUT, AllocationType::NonLinear, GRANULARITY) + .unwrap(), + ); } } - assert!(allocator.allocate(dummy_info_linear!()).is_err()); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY) + .is_err()); assert!(allocator.free_size() == 0); - drop(linear_allocs); - assert!(allocator.allocate(dummy_info!(GRANULARITY)).is_ok()); + for alloc in linear_allocs.drain(..) { + unsafe { allocator.deallocate(alloc) }; + } - let _alloc = allocator.allocate(dummy_info!()).unwrap(); - assert!(allocator.allocate(dummy_info!()).is_err()); - assert!(allocator.allocate(dummy_info_linear!()).is_err()); - } - - #[test] - fn pool_allocator_capacity() { - const BLOCK_SIZE: DeviceSize = 1024; - - fn dummy_allocator( - device: Arc, - allocation_size: DeviceSize, - ) -> Arc> { - let device_memory = DeviceMemory::allocate( - device, - MemoryAllocateInfo { - allocation_size, - memory_type_index: 0, - ..Default::default() - }, + let alloc = allocator + .allocate( + DeviceLayout::from_size_alignment(GRANULARITY.as_devicesize(), 1).unwrap(), + AllocationType::Unknown, + GRANULARITY, ) .unwrap(); + unsafe { allocator.deallocate(alloc) }; - PoolAllocator::new( - MemoryAlloc::new(device_memory), - DeviceAlignment::new(1).unwrap(), - ) - } - - let (device, _) = gfx_dev_and_queue!(); - - assert_should_panic!({ dummy_allocator(device.clone(), BLOCK_SIZE - 1) }); - - let allocator = dummy_allocator(device.clone(), 2 * BLOCK_SIZE - 1); - { - let alloc = allocator.allocate(dummy_info!()).unwrap(); - assert!(allocator.allocate(dummy_info!()).is_err()); - - drop(alloc); - let _alloc = allocator.allocate(dummy_info!()).unwrap(); - } - - let allocator = dummy_allocator(device, 2 * BLOCK_SIZE); - { - let alloc1 = allocator.allocate(dummy_info!()).unwrap(); - let alloc2 = allocator.allocate(dummy_info!()).unwrap(); - assert!(allocator.allocate(dummy_info!()).is_err()); - - drop(alloc1); - let alloc1 = allocator.allocate(dummy_info!()).unwrap(); - assert!(allocator.allocate(dummy_info!()).is_err()); - - drop(alloc1); - drop(alloc2); - let _alloc1 = allocator.allocate(dummy_info!()).unwrap(); - let _alloc2 = allocator.allocate(dummy_info!()).unwrap(); - } - } - - #[test] - fn pool_allocator_respects_alignment() { - const BLOCK_SIZE: DeviceSize = 1024 + 128; - - let info_a = dummy_info!(BLOCK_SIZE, 256); - let info_b = dummy_info!(1024, 256); - - let allocator = { - let (device, _) = gfx_dev_and_queue!(); - let device_memory = DeviceMemory::allocate( - device, - MemoryAllocateInfo { - allocation_size: 10 * BLOCK_SIZE, - memory_type_index: 0, - ..Default::default() - }, - ) + let alloc = allocator + .allocate(DUMMY_LAYOUT, AllocationType::Unknown, GRANULARITY) .unwrap(); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Unknown, GRANULARITY) + .is_err()); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY) + .is_err()); + unsafe { allocator.deallocate(alloc) }; - PoolAllocator::::new( - MemoryAlloc::new(device_memory), - DeviceAlignment::new(1).unwrap(), - ) - }; - - // This uses the fact that block indices are inserted into the free-list in order, so - // the first allocation succeeds because the block has an even index, while the second - // has an odd index. - allocator.allocate(info_a.clone()).unwrap(); - assert!(allocator.allocate(info_a.clone()).is_err()); - allocator.allocate(info_a.clone()).unwrap(); - assert!(allocator.allocate(info_a).is_err()); - - for _ in 0..10 { - allocator.allocate(info_b.clone()).unwrap(); + for alloc in nonlinear_allocs.drain(..) { + unsafe { allocator.deallocate(alloc) }; } } - #[test] - fn pool_allocator_respects_granularity() { - const BLOCK_SIZE: DeviceSize = 128; - - fn dummy_allocator( - device: Arc, - allocation_type: AllocationType, - ) -> Arc> { - let device_memory = DeviceMemory::allocate( - device, - MemoryAllocateInfo { - allocation_size: 1024, - memory_type_index: 0, - ..Default::default() - }, - ) - .unwrap(); - let mut region = MemoryAlloc::new(device_memory); - unsafe { region.set_allocation_type(allocation_type) }; - - PoolAllocator::new(region, DeviceAlignment::new(256).unwrap()) - } - - let (device, _) = gfx_dev_and_queue!(); - - let allocator = dummy_allocator(device.clone(), AllocationType::Unknown); - assert!(allocator.block_count() == 4); - - let allocator = dummy_allocator(device.clone(), AllocationType::Linear); - assert!(allocator.block_count() == 8); - - let allocator = dummy_allocator(device, AllocationType::NonLinear); - assert!(allocator.block_count() == 8); - } - #[test] fn buddy_allocator_capacity() { const MAX_ORDER: usize = 10; const REGION_SIZE: DeviceSize = BuddyAllocator::MIN_NODE_SIZE << MAX_ORDER; - let allocator = dummy_allocator!(BuddyAllocator, REGION_SIZE); + let allocator = BuddyAllocator::new(0, REGION_SIZE); let mut allocs = Vec::with_capacity(1 << MAX_ORDER); for order in 0..=MAX_ORDER { - let size = BuddyAllocator::MIN_NODE_SIZE << order; + let layout = + DeviceLayout::from_size_alignment(BuddyAllocator::MIN_NODE_SIZE << order, 1) + .unwrap(); for _ in 0..1 << (MAX_ORDER - order) { - allocs.push(allocator.allocate(dummy_info!(size)).unwrap()); + allocs.push( + allocator + .allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN) + .unwrap(), + ); } - assert!(allocator.allocate(dummy_info!()).is_err()); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN) + .is_err()); assert!(allocator.free_size() == 0); - allocs.clear(); + + for alloc in allocs.drain(..) { + unsafe { allocator.deallocate(alloc) }; + } } let mut orders = (0..MAX_ORDER).collect::>(); @@ -2772,14 +1488,29 @@ mod tests { orders.rotate_left(mid); for &order in &orders { - let size = BuddyAllocator::MIN_NODE_SIZE << order; - allocs.push(allocator.allocate(dummy_info!(size)).unwrap()); + let layout = + DeviceLayout::from_size_alignment(BuddyAllocator::MIN_NODE_SIZE << order, 1) + .unwrap(); + + allocs.push( + allocator + .allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN) + .unwrap(), + ); } - let _alloc = allocator.allocate(dummy_info!()).unwrap(); - assert!(allocator.allocate(dummy_info!()).is_err()); + let alloc = allocator + .allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN) + .unwrap(); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN) + .is_err()); assert!(allocator.free_size() == 0); - allocs.clear(); + unsafe { allocator.deallocate(alloc) }; + + for alloc in allocs.drain(..) { + unsafe { allocator.deallocate(alloc) }; + } } } @@ -2787,135 +1518,196 @@ mod tests { fn buddy_allocator_respects_alignment() { const REGION_SIZE: DeviceSize = 4096; - let allocator = dummy_allocator!(BuddyAllocator, REGION_SIZE); + let allocator = BuddyAllocator::new(0, REGION_SIZE); { - let info = dummy_info!(1, 4096); + let layout = DeviceLayout::from_size_alignment(1, 4096).unwrap(); - let _alloc = allocator.allocate(info.clone()).unwrap(); - assert!(allocator.allocate(info).is_err()); + let alloc = allocator + .allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN) + .unwrap(); + assert!(allocator + .allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN) + .is_err()); assert!(allocator.free_size() == REGION_SIZE - BuddyAllocator::MIN_NODE_SIZE); + unsafe { allocator.deallocate(alloc) }; } { - let info_a = dummy_info!(1, 256); - let allocations_a = REGION_SIZE / info_a.layout.alignment().as_devicesize(); - let info_b = dummy_info!(1, 16); - let allocations_b = - REGION_SIZE / info_b.layout.alignment().as_devicesize() - allocations_a; + let layout_a = DeviceLayout::from_size_alignment(1, 256).unwrap(); + let allocations_a = REGION_SIZE / layout_a.alignment().as_devicesize(); + let layout_b = DeviceLayout::from_size_alignment(1, 16).unwrap(); + let allocations_b = REGION_SIZE / layout_b.alignment().as_devicesize() - allocations_a; let mut allocs = Vec::with_capacity((REGION_SIZE / BuddyAllocator::MIN_NODE_SIZE) as usize); for _ in 0..allocations_a { - allocs.push(allocator.allocate(info_a.clone()).unwrap()); + allocs.push( + allocator + .allocate(layout_a, AllocationType::Unknown, DeviceAlignment::MIN) + .unwrap(), + ); } - assert!(allocator.allocate(info_a).is_err()); + assert!(allocator + .allocate(layout_a, AllocationType::Unknown, DeviceAlignment::MIN) + .is_err()); assert!( allocator.free_size() == REGION_SIZE - allocations_a * BuddyAllocator::MIN_NODE_SIZE ); for _ in 0..allocations_b { - allocs.push(allocator.allocate(info_b.clone()).unwrap()); + allocs.push( + allocator + .allocate(layout_b, AllocationType::Unknown, DeviceAlignment::MIN) + .unwrap(), + ); } - assert!(allocator.allocate(dummy_info!()).is_err()); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN) + .is_err()); assert!(allocator.free_size() == 0); + + for alloc in allocs { + unsafe { allocator.deallocate(alloc) }; + } } } #[test] fn buddy_allocator_respects_granularity() { - const GRANULARITY: DeviceSize = 256; - const REGION_SIZE: DeviceSize = 2 * GRANULARITY; + const GRANULARITY: DeviceAlignment = unwrap(DeviceAlignment::new(256)); + const REGION_SIZE: DeviceSize = 2 * GRANULARITY.as_devicesize(); - let allocator = dummy_allocator!(BuddyAllocator, REGION_SIZE, GRANULARITY); + let allocator = BuddyAllocator::new(0, REGION_SIZE); { const ALLOCATIONS: DeviceSize = REGION_SIZE / BuddyAllocator::MIN_NODE_SIZE; let mut allocs = Vec::with_capacity(ALLOCATIONS as usize); + for _ in 0..ALLOCATIONS { - allocs.push(allocator.allocate(dummy_info_linear!()).unwrap()); + allocs.push( + allocator + .allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY) + .unwrap(), + ); } - assert!(allocator.allocate(dummy_info_linear!()).is_err()); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY) + .is_err()); assert!(allocator.free_size() == 0); + + for alloc in allocs { + unsafe { allocator.deallocate(alloc) }; + } } { - let _alloc1 = allocator.allocate(dummy_info!()).unwrap(); - let _alloc2 = allocator.allocate(dummy_info!()).unwrap(); - assert!(allocator.allocate(dummy_info!()).is_err()); + let alloc1 = allocator + .allocate(DUMMY_LAYOUT, AllocationType::Unknown, GRANULARITY) + .unwrap(); + let alloc2 = allocator + .allocate(DUMMY_LAYOUT, AllocationType::Unknown, GRANULARITY) + .unwrap(); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY) + .is_err()); assert!(allocator.free_size() == 0); + unsafe { allocator.deallocate(alloc1) }; + unsafe { allocator.deallocate(alloc2) }; } } #[test] fn bump_allocator_respects_alignment() { const ALIGNMENT: DeviceSize = 16; + const REGION_SIZE: DeviceSize = 10 * ALIGNMENT; - let info = dummy_info!(1, ALIGNMENT); - let allocator = dummy_allocator!(BumpAllocator, ALIGNMENT * 10); + let layout = DeviceLayout::from_size_alignment(1, ALIGNMENT).unwrap(); + let mut allocator = BumpAllocator::new(0, REGION_SIZE); for _ in 0..10 { - allocator.allocate(info.clone()).unwrap(); + allocator + .allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN) + .unwrap(); } - assert!(allocator.allocate(info.clone()).is_err()); + assert!(allocator + .allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN) + .is_err()); for _ in 0..ALIGNMENT - 1 { - allocator.allocate(dummy_info!()).unwrap(); + allocator + .allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN) + .unwrap(); } - assert!(allocator.allocate(info).is_err()); + assert!(allocator + .allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN) + .is_err()); assert!(allocator.free_size() == 0); + + allocator.reset(); + assert!(allocator.free_size() == REGION_SIZE); } #[test] fn bump_allocator_respects_granularity() { const ALLOCATIONS: DeviceSize = 10; - const GRANULARITY: DeviceSize = 1024; + const GRANULARITY: DeviceAlignment = unwrap(DeviceAlignment::new(1024)); + const REGION_SIZE: DeviceSize = ALLOCATIONS * GRANULARITY.as_devicesize(); - let mut allocator = dummy_allocator!(BumpAllocator, GRANULARITY * ALLOCATIONS, GRANULARITY); + let mut allocator = BumpAllocator::new(0, REGION_SIZE); for i in 0..ALLOCATIONS { - for _ in 0..GRANULARITY { + for _ in 0..GRANULARITY.as_devicesize() { allocator - .allocate(SuballocationCreateInfo { - allocation_type: if i % 2 == 0 { + .allocate( + DUMMY_LAYOUT, + if i % 2 == 0 { AllocationType::NonLinear } else { AllocationType::Linear }, - ..dummy_info!() - }) + GRANULARITY, + ) .unwrap(); } } - assert!(allocator.allocate(dummy_info_linear!()).is_err()); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY) + .is_err()); assert!(allocator.free_size() == 0); - allocator.try_reset().unwrap(); + allocator.reset(); for i in 0..ALLOCATIONS { allocator - .allocate(SuballocationCreateInfo { - allocation_type: if i % 2 == 0 { + .allocate( + DUMMY_LAYOUT, + if i % 2 == 0 { AllocationType::Linear } else { AllocationType::NonLinear }, - ..dummy_info!() - }) + GRANULARITY, + ) .unwrap(); } - assert!(allocator.allocate(dummy_info_linear!()).is_err()); - assert!(allocator.free_size() == GRANULARITY - 1); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Linear, GRANULARITY) + .is_err()); + assert!(allocator.free_size() == GRANULARITY.as_devicesize() - 1); + + allocator.reset(); + assert!(allocator.free_size() == REGION_SIZE); } #[test] @@ -2926,90 +1718,30 @@ mod tests { const REGION_SIZE: DeviceSize = (ALLOCATION_STEP * (THREADS + 1) * THREADS / 2) * ALLOCATIONS_PER_THREAD; - let mut allocator = dummy_allocator!(BumpAllocator, REGION_SIZE); + let mut allocator = BumpAllocator::new(0, REGION_SIZE); thread::scope(|scope| { for i in 1..=THREADS { let allocator = &allocator; scope.spawn(move || { - let info = dummy_info!(i * ALLOCATION_STEP); + let layout = DeviceLayout::from_size_alignment(i * ALLOCATION_STEP, 1).unwrap(); for _ in 0..ALLOCATIONS_PER_THREAD { - allocator.allocate(info.clone()).unwrap(); + allocator + .allocate(layout, AllocationType::Unknown, DeviceAlignment::MIN) + .unwrap(); } }); } }); - assert!(allocator.allocate(dummy_info!()).is_err()); + assert!(allocator + .allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN) + .is_err()); assert!(allocator.free_size() == 0); - allocator.try_reset().unwrap(); + allocator.reset(); assert!(allocator.free_size() == REGION_SIZE); } - - macro_rules! dummy_allocator { - ($type:ty, $size:expr) => { - dummy_allocator!($type, $size, 1) - }; - ($type:ty, $size:expr, $granularity:expr) => {{ - let (device, _) = gfx_dev_and_queue!(); - let device_memory = DeviceMemory::allocate( - device, - MemoryAllocateInfo { - allocation_size: $size, - memory_type_index: 0, - ..Default::default() - }, - ) - .unwrap(); - let mut allocator = <$type>::new(MemoryAlloc::new(device_memory)); - Arc::get_mut(&mut allocator) - .unwrap() - .buffer_image_granularity = DeviceAlignment::new($granularity).unwrap(); - - allocator - }}; - } - - macro_rules! dummy_info { - () => { - dummy_info!(1) - }; - ($size:expr) => { - dummy_info!($size, 1) - }; - ($size:expr, $alignment:expr) => { - SuballocationCreateInfo { - layout: DeviceLayout::new( - NonZeroDeviceSize::new($size).unwrap(), - DeviceAlignment::new($alignment).unwrap(), - ) - .unwrap(), - allocation_type: AllocationType::Unknown, - ..Default::default() - } - }; - } - - macro_rules! dummy_info_linear { - ($($args:tt)*) => { - SuballocationCreateInfo { - allocation_type: AllocationType::Linear, - ..dummy_info!($($args)*) - } - }; - } - - macro_rules! dummy_info_nonlinear { - ($($args:tt)*) => { - SuballocationCreateInfo { - allocation_type: AllocationType::NonLinear, - ..dummy_info!($($args)*) - } - }; - } - - use {dummy_allocator, dummy_info, dummy_info_linear, dummy_info_nonlinear}; } diff --git a/vulkano/src/memory/device_memory.rs b/vulkano/src/memory/device_memory.rs index 5afa1b36..a1cc125f 100644 --- a/vulkano/src/memory/device_memory.rs +++ b/vulkano/src/memory/device_memory.rs @@ -12,7 +12,7 @@ use crate::{ device::{Device, DeviceOwned}, instance::InstanceOwnedDebugWrapper, macros::{impl_id_counter, vulkan_bitflags, vulkan_bitflags_enum}, - memory::{is_aligned, MemoryPropertyFlags}, + memory::{allocator::DeviceLayout, is_aligned, MemoryPropertyFlags}, DeviceSize, Requires, RequiresAllOf, RequiresOneOf, Validated, ValidationError, Version, VulkanError, VulkanObject, }; @@ -284,6 +284,9 @@ impl DeviceMemory { output.assume_init() }; + // Sanity check: this would lead to UB when suballocating. + assert!(allocation_size <= DeviceLayout::MAX_SIZE); + let atom_size = device.physical_device().properties().non_coherent_atom_size; let is_coherent = device.physical_device().memory_properties().memory_types @@ -330,6 +333,9 @@ impl DeviceMemory { _ne: _, } = allocate_info; + // Sanity check: this would lead to UB when suballocating. + assert!(allocation_size <= DeviceLayout::MAX_SIZE); + let atom_size = device.physical_device().properties().non_coherent_atom_size; let is_coherent = device.physical_device().memory_properties().memory_types @@ -407,6 +413,10 @@ impl DeviceMemory { self.atom_size } + pub(crate) fn is_coherent(&self) -> bool { + self.is_coherent + } + /// Maps a range of memory to be accessed by the host. /// /// `self` must not be host-mapped already and must be allocated from host-visible memory. diff --git a/vulkano/src/memory/mod.rs b/vulkano/src/memory/mod.rs index 8603676d..c7d624b9 100644 --- a/vulkano/src/memory/mod.rs +++ b/vulkano/src/memory/mod.rs @@ -91,18 +91,25 @@ //! get memory from that pool. By default if you don't specify any pool when creating a buffer or //! an image, an instance of `StandardMemoryPool` that is shared by the `Device` object is used. -use self::allocator::DeviceLayout; +use self::allocator::{ + align_up, AllocationHandle, AllocationType, DeviceLayout, MemoryAlloc, MemoryAllocator, + Suballocation, +}; pub use self::{alignment::*, device_memory::*}; use crate::{ buffer::{sys::RawBuffer, Subbuffer}, + device::{Device, DeviceOwned, DeviceOwnedDebugWrapper}, image::{sys::RawImage, Image, ImageAspects}, macros::vulkan_bitflags, - sync::semaphore::Semaphore, - DeviceSize, + sync::{semaphore::Semaphore, HostAccessError}, + DeviceSize, Validated, ValidationError, VulkanError, }; use std::{ + cmp, + mem::ManuallyDrop, num::NonZeroU64, ops::{Bound, Range, RangeBounds, RangeTo}, + ptr::{self, NonNull}, sync::Arc, }; @@ -110,6 +117,332 @@ mod alignment; pub mod allocator; mod device_memory; +/// Memory that can be bound to resources. +/// +/// Most commonly you will want to obtain this by first using a [memory allocator] and then +/// [constructing this object from its allocation]. Alternatively, if you want to bind a whole +/// block of `DeviceMemory` to a resource, or can't go through an allocator, you can use [the +/// dedicated constructor]. +/// +/// [memory allocator]: allocator::MemoryAllocator +/// [the dedicated constructor]: Self::new_dedicated +#[derive(Debug)] +pub struct ResourceMemory { + device_memory: ManuallyDrop>>, + offset: DeviceSize, + size: DeviceSize, + allocation_type: AllocationType, + allocation_handle: AllocationHandle, + suballocation_handle: Option, + allocator: Option>, +} + +impl ResourceMemory { + /// Creates a new `ResourceMemory` that has a whole device memory block dedicated to it. You + /// may use this when you obtain the memory in a way other than through the use of a memory + /// allocator, for instance by importing memory. + /// + /// This is safe because we take ownership of the device memory, so that there can be no + /// aliasing resources. On the other hand, the device memory can never be reused: it will be + /// freed once the returned object is dropped. + pub fn new_dedicated(device_memory: DeviceMemory) -> Self { + unsafe { Self::new_dedicated_unchecked(Arc::new(device_memory)) } + } + + #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] + pub unsafe fn new_dedicated_unchecked(device_memory: Arc) -> Self { + ResourceMemory { + offset: 0, + size: device_memory.allocation_size(), + allocation_type: AllocationType::Unknown, + allocation_handle: AllocationHandle(ptr::null_mut()), + suballocation_handle: None, + allocator: None, + device_memory: ManuallyDrop::new(DeviceOwnedDebugWrapper(device_memory)), + } + } + + /// Creates a new `ResourceMemory` from an allocation of a memory allocator. + /// + /// Ownership of `allocation` is semantically transferred to this object, and it is deallocated + /// when the returned object is dropped. + /// + /// # Safety + /// + /// - `allocation` must refer to a **currently allocated** allocation of `allocator`. + /// - `allocation` must never be deallocated. + #[inline] + pub unsafe fn from_allocation( + allocator: Arc, + allocation: MemoryAlloc, + ) -> Self { + if let Some(suballocation) = allocation.suballocation { + ResourceMemory { + offset: suballocation.offset, + size: suballocation.size, + allocation_type: allocation.allocation_type, + allocation_handle: allocation.allocation_handle, + suballocation_handle: Some(suballocation.handle), + allocator: Some(allocator), + device_memory: ManuallyDrop::new(DeviceOwnedDebugWrapper(allocation.device_memory)), + } + } else { + ResourceMemory { + offset: 0, + size: allocation.device_memory.allocation_size(), + allocation_type: allocation.allocation_type, + allocation_handle: allocation.allocation_handle, + suballocation_handle: None, + allocator: Some(allocator), + device_memory: ManuallyDrop::new(DeviceOwnedDebugWrapper(allocation.device_memory)), + } + } + } + + /// Returns the underlying block of [`DeviceMemory`]. + #[inline] + pub fn device_memory(&self) -> &Arc { + &self.device_memory + } + + /// Returns the offset (in bytes) within the [`DeviceMemory`] block where this `ResourceMemory` + /// beings. + /// + /// If this `ResourceMemory` is not a [suballocation], then this will be `0`. + /// + /// [suballocation]: Suballocation + #[inline] + pub fn offset(&self) -> DeviceSize { + self.offset + } + + /// Returns the size (in bytes) of the `ResourceMemory`. + /// + /// If this `ResourceMemory` is not a [suballocation], then this will be equal to the + /// [allocation size] of the [`DeviceMemory`] block. + /// + /// [suballocation]: Suballocation + #[inline] + pub fn size(&self) -> DeviceSize { + self.size + } + + /// Returns the type of resources that can be bound to this `ResourceMemory`. + /// + /// If this `ResourceMemory` is not a [suballocation], then this will be + /// [`AllocationType::Unknown`]. + /// + /// [suballocation]: Suballocation + #[inline] + pub fn allocation_type(&self) -> AllocationType { + self.allocation_type + } + + fn suballocation(&self) -> Option { + self.suballocation_handle.map(|handle| Suballocation { + offset: self.offset, + size: self.size, + handle, + }) + } + + /// Returns the mapped pointer to a range of the `ResourceMemory`, or returns [`None`] if ouf + /// of bounds. + /// + /// `range` is specified in bytes relative to the beginning of `self` and must fall within the + /// range of the memory mapping given to [`DeviceMemory::map`]. + /// + /// See [`MappingState::slice`] for the safety invariants of the returned pointer. + /// + /// [`MappingState::slice`]: crate::memory::MappingState::slice + #[inline] + pub fn mapped_slice( + &self, + range: impl RangeBounds, + ) -> Option, HostAccessError>> { + let mut range = self::range(range, ..self.size())?; + range.start += self.offset(); + range.end += self.offset(); + + let res = if let Some(state) = self.device_memory().mapping_state() { + state.slice(range).ok_or(HostAccessError::OutOfMappedRange) + } else { + Err(HostAccessError::NotHostMapped) + }; + + Some(res) + } + + #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] + #[inline] + pub unsafe fn mapped_slice_unchecked( + &self, + range: impl RangeBounds, + ) -> Result, HostAccessError> { + let mut range = self::range_unchecked(range, ..self.size()); + range.start += self.offset(); + range.end += self.offset(); + + if let Some(state) = self.device_memory().mapping_state() { + state.slice(range).ok_or(HostAccessError::OutOfMappedRange) + } else { + Err(HostAccessError::NotHostMapped) + } + } + + pub(crate) fn atom_size(&self) -> Option { + let memory = self.device_memory(); + + (!memory.is_coherent()).then_some(memory.atom_size()) + } + + /// Invalidates the host cache for a range of the `ResourceMemory`. + /// + /// If the device memory is not [host-coherent], you must call this function before the memory + /// is read by the host, if the device previously wrote to the memory. It has no effect if the + /// memory is host-coherent. + /// + /// # Safety + /// + /// - If there are memory writes by the device that have not been propagated into the host + /// cache, then there must not be any references in Rust code to any portion of the specified + /// `memory_range`. + /// + /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT + /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size + #[inline] + pub unsafe fn invalidate_range( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), Validated> { + self.validate_memory_range(&memory_range)?; + + self.device_memory() + .invalidate_range(self.create_memory_range(memory_range)) + } + + #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] + #[inline] + pub unsafe fn invalidate_range_unchecked( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), VulkanError> { + self.device_memory() + .invalidate_range_unchecked(self.create_memory_range(memory_range)) + } + + /// Flushes the host cache for a range of the `ResourceMemory`. + /// + /// If the device memory is not [host-coherent], you must call this function after writing to + /// the memory, if the device is going to read the memory. It has no effect if the memory is + /// host-coherent. + /// + /// # Safety + /// + /// - There must be no operations pending or executing in a device queue, that access any + /// portion of the specified `memory_range`. + /// + /// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT + /// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size + #[inline] + pub unsafe fn flush_range( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), Validated> { + self.validate_memory_range(&memory_range)?; + + self.device_memory() + .flush_range(self.create_memory_range(memory_range)) + } + + #[cfg_attr(not(feature = "document_unchecked"), doc(hidden))] + #[inline] + pub unsafe fn flush_range_unchecked( + &self, + memory_range: MappedMemoryRange, + ) -> Result<(), VulkanError> { + self.device_memory() + .flush_range_unchecked(self.create_memory_range(memory_range)) + } + + fn validate_memory_range( + &self, + memory_range: &MappedMemoryRange, + ) -> Result<(), Box> { + let &MappedMemoryRange { + offset, + size, + _ne: _, + } = memory_range; + + if !(offset <= self.size() && size <= self.size() - offset) { + return Err(Box::new(ValidationError { + context: "memory_range".into(), + problem: "is not contained within the allocation".into(), + ..Default::default() + })); + } + + Ok(()) + } + + fn create_memory_range(&self, memory_range: MappedMemoryRange) -> MappedMemoryRange { + let MappedMemoryRange { + mut offset, + mut size, + _ne: _, + } = memory_range; + + let memory = self.device_memory(); + + offset += self.offset(); + + // VUID-VkMappedMemoryRange-size-01390 + if memory_range.offset + size == self.size() { + // We can align the end of the range like this without aliasing other allocations, + // because the memory allocator must ensure that all allocations are aligned to the + // atom size for non-host-coherent host-visible memory. + let end = cmp::min( + align_up(offset + size, memory.atom_size()), + memory.allocation_size(), + ); + size = end - offset; + } + + MappedMemoryRange { + offset, + size, + _ne: crate::NonExhaustive(()), + } + } +} + +impl Drop for ResourceMemory { + #[inline] + fn drop(&mut self) { + let device_memory = unsafe { ManuallyDrop::take(&mut self.device_memory) }.0; + + if let Some(allocator) = &self.allocator { + let allocation = MemoryAlloc { + device_memory, + suballocation: self.suballocation(), + allocation_type: self.allocation_type, + allocation_handle: self.allocation_handle, + }; + + // SAFETY: Enforced by the safety contract of [`ResourceMemory::from_allocation`]. + unsafe { allocator.deallocate(allocation) }; + } + } +} + +unsafe impl DeviceOwned for ResourceMemory { + #[inline] + fn device(&self) -> &Arc { + self.device_memory().device() + } +} + /// Properties of the memory in a physical device. #[derive(Clone, Debug)] #[non_exhaustive] diff --git a/vulkano/src/pipeline/compute.rs b/vulkano/src/pipeline/compute.rs index dce3aa71..5b58e0d4 100644 --- a/vulkano/src/pipeline/compute.rs +++ b/vulkano/src/pipeline/compute.rs @@ -468,6 +468,7 @@ mod tests { shader::{ShaderModule, ShaderModuleCreateInfo}, sync::{now, GpuFuture}, }; + use std::sync::Arc; // TODO: test for basic creation // TODO: test for pipeline layout error @@ -531,9 +532,9 @@ mod tests { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let data_buffer = Buffer::from_data( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::STORAGE_BUFFER, ..Default::default() @@ -662,9 +663,9 @@ mod tests { .unwrap() }; - let memory_allocator = StandardMemoryAllocator::new_default(device.clone()); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone())); let data_buffer = Buffer::from_data( - &memory_allocator, + memory_allocator, BufferCreateInfo { usage: BufferUsage::STORAGE_BUFFER, ..Default::default() diff --git a/vulkano/src/render_pass/framebuffer.rs b/vulkano/src/render_pass/framebuffer.rs index 9364a0d8..a78a1d2b 100644 --- a/vulkano/src/render_pass/framebuffer.rs +++ b/vulkano/src/render_pass/framebuffer.rs @@ -665,6 +665,7 @@ mod tests { SubpassDescription, }, }; + use std::sync::Arc; #[test] fn simple_create() { @@ -687,10 +688,10 @@ mod tests { ) .unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let view = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, @@ -768,10 +769,10 @@ mod tests { ) .unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let view = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8_UNORM, @@ -818,10 +819,10 @@ mod tests { ) .unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let view = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, @@ -868,10 +869,10 @@ mod tests { ) .unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let view = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, @@ -924,10 +925,10 @@ mod tests { ) .unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let a = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, @@ -942,7 +943,7 @@ mod tests { .unwrap(); let b = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, @@ -998,10 +999,10 @@ mod tests { ) .unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let view = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, @@ -1046,10 +1047,10 @@ mod tests { ) .unwrap(); - let memory_allocator = StandardMemoryAllocator::new_default(device); + let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device)); let a = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator.clone(), ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM, @@ -1064,7 +1065,7 @@ mod tests { .unwrap(); let b = ImageView::new_default( Image::new( - &memory_allocator, + memory_allocator, ImageCreateInfo { image_type: ImageType::Dim2d, format: Format::R8G8B8A8_UNORM,