Memory (sub)allocation API 2.0 (#2316)

* Excommunicate `PoolAllocator`

* Switch to manual deallocation

* Move `ResourceMemory` away from the `suballocator` module

* Remove `SuballocatorError::BlockSizeExceeded`

* Fix examples

* Fix atom size

* Address safety TODOs

* Nice English you got there bro
This commit is contained in:
marc0246 2023-09-03 13:09:07 +02:00 committed by GitHub
parent fe7b2371a9
commit 5578bf30da
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
55 changed files with 1418 additions and 2200 deletions

View File

@ -274,7 +274,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -293,7 +293,7 @@ fn main() {
let uniform_buffers = (0..swapchain.image_count())
.map(|_| {
Buffer::new_sized(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::UNIFORM_BUFFER,
..Default::default()
@ -312,7 +312,7 @@ fn main() {
// is used exclusively for writing, swapping the two after each update.
let textures = [(); 2].map(|_| {
Image::new(
&memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,
@ -714,7 +714,7 @@ fn run_worker(
// out-of-date texture is the current up-to-date texture and vice-versa, cycle repeating.
let staging_buffers = [(); 2].map(|_| {
Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()

View File

@ -13,6 +13,7 @@
// been more or more used for general-purpose operations as well. This is called "General-Purpose
// GPU", or *GPGPU*. This is what this example demonstrates.
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferCreateInfo, BufferUsage},
command_buffer::{
@ -158,14 +159,14 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
// We start by creating the buffer that will store the data.
let data_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::STORAGE_BUFFER,
..Default::default()

View File

@ -19,7 +19,7 @@ use vulkano::{
},
device::Queue,
image::view::ImageView,
memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter},
memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator},
pipeline::{
graphics::{
color_blend::{AttachmentBlend, BlendFactor, BlendOp, ColorBlendState},
@ -54,7 +54,7 @@ impl AmbientLightingSystem {
pub fn new(
gfx_queue: Arc<Queue>,
subpass: Subpass,
memory_allocator: &impl MemoryAllocator,
memory_allocator: Arc<StandardMemoryAllocator>,
command_buffer_allocator: Arc<StandardCommandBufferAllocator>,
descriptor_set_allocator: Arc<StandardDescriptorSetAllocator>,
) -> AmbientLightingSystem {

View File

@ -20,7 +20,7 @@ use vulkano::{
},
device::Queue,
image::view::ImageView,
memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter},
memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator},
pipeline::{
graphics::{
color_blend::{AttachmentBlend, BlendFactor, BlendOp, ColorBlendState},
@ -55,7 +55,7 @@ impl DirectionalLightingSystem {
pub fn new(
gfx_queue: Arc<Queue>,
subpass: Subpass,
memory_allocator: &impl MemoryAllocator,
memory_allocator: Arc<StandardMemoryAllocator>,
command_buffer_allocator: Arc<StandardCommandBufferAllocator>,
descriptor_set_allocator: Arc<StandardDescriptorSetAllocator>,
) -> DirectionalLightingSystem {

View File

@ -20,7 +20,7 @@ use vulkano::{
},
device::Queue,
image::view::ImageView,
memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter},
memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator},
pipeline::{
graphics::{
color_blend::{AttachmentBlend, BlendFactor, BlendOp, ColorBlendState},
@ -54,7 +54,7 @@ impl PointLightingSystem {
pub fn new(
gfx_queue: Arc<Queue>,
subpass: Subpass,
memory_allocator: &impl MemoryAllocator,
memory_allocator: Arc<StandardMemoryAllocator>,
command_buffer_allocator: Arc<StandardCommandBufferAllocator>,
descriptor_set_allocator: Arc<StandardDescriptorSetAllocator>,
) -> PointLightingSystem {

View File

@ -155,7 +155,7 @@ impl FrameSystem {
// will be replaced the first time we call `frame()`.
let diffuse_buffer = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::A2B10G10R10_UNORM_PACK32,
@ -172,7 +172,7 @@ impl FrameSystem {
.unwrap();
let normals_buffer = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R16G16B16A16_SFLOAT,
@ -187,7 +187,7 @@ impl FrameSystem {
.unwrap();
let depth_buffer = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::D16_UNORM,
@ -211,21 +211,21 @@ impl FrameSystem {
let ambient_lighting_system = AmbientLightingSystem::new(
gfx_queue.clone(),
lighting_subpass.clone(),
&memory_allocator,
memory_allocator.clone(),
command_buffer_allocator.clone(),
descriptor_set_allocator.clone(),
);
let directional_lighting_system = DirectionalLightingSystem::new(
gfx_queue.clone(),
lighting_subpass.clone(),
&memory_allocator,
memory_allocator.clone(),
command_buffer_allocator.clone(),
descriptor_set_allocator.clone(),
);
let point_lighting_system = PointLightingSystem::new(
gfx_queue.clone(),
lighting_subpass,
&memory_allocator,
memory_allocator.clone(),
command_buffer_allocator.clone(),
descriptor_set_allocator,
);
@ -281,7 +281,7 @@ impl FrameSystem {
// render pass their content becomes undefined.
self.diffuse_buffer = ImageView::new_default(
Image::new(
&self.memory_allocator,
self.memory_allocator.clone(),
ImageCreateInfo {
extent,
format: Format::A2B10G10R10_UNORM_PACK32,
@ -297,7 +297,7 @@ impl FrameSystem {
.unwrap();
self.normals_buffer = ImageView::new_default(
Image::new(
&self.memory_allocator,
self.memory_allocator.clone(),
ImageCreateInfo {
extent,
format: Format::R16G16B16A16_SFLOAT,
@ -313,7 +313,7 @@ impl FrameSystem {
.unwrap();
self.depth_buffer = ImageView::new_default(
Image::new(
&self.memory_allocator,
self.memory_allocator.clone(),
ImageCreateInfo {
extent,
format: Format::D16_UNORM,

View File

@ -174,7 +174,7 @@ fn main() {
let triangle_draw_system = TriangleDrawSystem::new(
queue.clone(),
frame_system.deferred_subpass(),
&memory_allocator,
memory_allocator.clone(),
command_buffer_allocator,
);

View File

@ -46,7 +46,7 @@ impl TriangleDrawSystem {
pub fn new(
gfx_queue: Arc<Queue>,
subpass: Subpass,
memory_allocator: &StandardMemoryAllocator,
memory_allocator: Arc<StandardMemoryAllocator>,
command_buffer_allocator: Arc<StandardCommandBufferAllocator>,
) -> TriangleDrawSystem {
let vertices = [

View File

@ -13,7 +13,7 @@
// Each draw or dispatch call can specify an offset into the buffer to read object data from,
// without having to rebind descriptor sets.
use std::{iter::repeat, mem::size_of};
use std::{iter::repeat, mem::size_of, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferCreateInfo, BufferUsage},
command_buffer::{
@ -153,7 +153,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
@ -188,7 +188,7 @@ fn main() {
};
let input_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::UNIFORM_BUFFER,
..Default::default()
@ -203,7 +203,7 @@ fn main() {
.unwrap();
let output_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::STORAGE_BUFFER,
..Default::default()

View File

@ -13,7 +13,7 @@
// Workgroup parallelism capabilities vary between GPUs and setting them properly is important to
// achieve the maximal performance that particular device can provide.
use std::{fs::File, io::BufWriter, path::Path};
use std::{fs::File, io::BufWriter, path::Path, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferCreateInfo, BufferUsage},
command_buffer::{
@ -209,13 +209,13 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
let image = Image::new(
&memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,
@ -238,7 +238,7 @@ fn main() {
.unwrap();
let buf = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_DST,
..Default::default()

View File

@ -41,11 +41,10 @@ mod linux {
},
memory::{
allocator::{
AllocationCreateInfo, MemoryAlloc, MemoryAllocator, MemoryTypeFilter,
StandardMemoryAllocator,
AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter, StandardMemoryAllocator,
},
DedicatedAllocation, DeviceMemory, ExternalMemoryHandleType, ExternalMemoryHandleTypes,
MemoryAllocateInfo,
MemoryAllocateInfo, ResourceMemory,
},
pipeline::{
graphics::{
@ -159,7 +158,7 @@ mod linux {
let image = Arc::new(
raw_image
.bind_memory([MemoryAlloc::new(image_memory)])
.bind_memory([ResourceMemory::new_dedicated(image_memory)])
.map_err(|(err, _, _)| err)
.unwrap(),
);
@ -464,7 +463,7 @@ mod linux {
Vec<Arc<Framebuffer>>,
Arc<Sampler>,
Arc<GraphicsPipeline>,
StandardMemoryAllocator,
Arc<StandardMemoryAllocator>,
Subbuffer<[MyVertex]>,
) {
let library = VulkanLibrary::new().unwrap();
@ -600,7 +599,7 @@ mod linux {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let vertices = [
MyVertex {
@ -617,7 +616,7 @@ mod linux {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()

View File

@ -156,7 +156,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
#[derive(BufferContents, Vertex)]
#[repr(C)]
@ -180,7 +180,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -229,7 +229,7 @@ fn main() {
let extent = [info.width * 2, info.height * 2, 1];
let upload_buffer = Buffer::new_slice(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
@ -248,7 +248,7 @@ fn main() {
.unwrap();
let image = Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
format: Format::R8G8B8A8_UNORM,
extent,

View File

@ -155,7 +155,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
#[derive(BufferContents, Vertex)]
#[repr(C)]
@ -179,7 +179,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -228,7 +228,7 @@ fn main() {
let extent = [info.width, info.height, 1];
let upload_buffer = Buffer::new_slice(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
@ -247,7 +247,7 @@ fn main() {
.unwrap();
let image = Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_SRGB,

View File

@ -161,7 +161,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
#[derive(BufferContents, Vertex)]
#[repr(C)]
@ -185,7 +185,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -234,7 +234,7 @@ fn main() {
let extent = [info.width, info.height, 1];
let upload_buffer = Buffer::new_slice(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
@ -253,7 +253,7 @@ fn main() {
.unwrap();
let image = Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_SRGB,

View File

@ -168,7 +168,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
// We now create a buffer that will store the shape of our triangle. This triangle is identical
// to the one in the `triangle.rs` example.
@ -184,7 +184,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -222,7 +222,7 @@ fn main() {
data
};
let instance_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()

View File

@ -81,7 +81,7 @@ impl FractalApp {
),
place_over_frame: RenderPassPlaceOverFrame::new(
gfx_queue,
&memory_allocator,
memory_allocator.clone(),
command_buffer_allocator,
descriptor_set_allocator,
image_format,

View File

@ -59,7 +59,7 @@ impl FractalComputePipeline {
];
let palette_size = colors.len() as i32;
let palette = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::STORAGE_BUFFER,
..Default::default()
@ -119,7 +119,7 @@ impl FractalComputePipeline {
colors.push([r, g, b, a]);
}
self.palette = Buffer::from_iter(
&self.memory_allocator,
self.memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::STORAGE_BUFFER,
..Default::default()

View File

@ -22,7 +22,7 @@ use vulkano::{
sampler::{Filter, Sampler, SamplerAddressMode, SamplerCreateInfo, SamplerMipmapMode},
view::ImageView,
},
memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter},
memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator},
pipeline::{
graphics::{
color_blend::ColorBlendState,
@ -89,13 +89,13 @@ impl PixelsDrawPipeline {
pub fn new(
gfx_queue: Arc<Queue>,
subpass: Subpass,
memory_allocator: &impl MemoryAllocator,
memory_allocator: Arc<StandardMemoryAllocator>,
command_buffer_allocator: Arc<StandardCommandBufferAllocator>,
descriptor_set_allocator: Arc<StandardDescriptorSetAllocator>,
) -> PixelsDrawPipeline {
let (vertices, indices) = textured_quad(2.0, 2.0);
let vertex_buffer = Buffer::from_iter(
memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()

View File

@ -18,7 +18,7 @@ use vulkano::{
device::Queue,
format::Format,
image::view::ImageView,
memory::allocator::MemoryAllocator,
memory::allocator::StandardMemoryAllocator,
render_pass::{Framebuffer, FramebufferCreateInfo, RenderPass, Subpass},
sync::GpuFuture,
};
@ -34,7 +34,7 @@ pub struct RenderPassPlaceOverFrame {
impl RenderPassPlaceOverFrame {
pub fn new(
gfx_queue: Arc<Queue>,
memory_allocator: &impl MemoryAllocator,
memory_allocator: Arc<StandardMemoryAllocator>,
command_buffer_allocator: Arc<StandardCommandBufferAllocator>,
descriptor_set_allocator: Arc<StandardDescriptorSetAllocator>,
output_format: Format,

View File

@ -62,7 +62,7 @@
// non-multisampled image. This operation is not a regular blit (blitting a multisampled image is
// an error), instead it is called *resolving* the image.
use std::{fs::File, io::BufWriter, path::Path};
use std::{fs::File, io::BufWriter, path::Path, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferContents, BufferCreateInfo, BufferUsage},
command_buffer::{
@ -150,7 +150,7 @@ fn main() {
.unwrap();
let queue = queues.next().unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
// Creating our intermediate multisampled image.
//
@ -158,7 +158,7 @@ fn main() {
// image. But we also pass the number of samples-per-pixel, which is 4 here.
let intermediary = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,
@ -175,7 +175,7 @@ fn main() {
// This is the final image that will receive the anti-aliased triangle.
let image = Image::new(
&memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,
@ -299,7 +299,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -368,7 +368,7 @@ fn main() {
let command_buffer_allocator = StandardCommandBufferAllocator::new(device, Default::default());
let buf = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_DST,
..Default::default()

View File

@ -177,7 +177,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
#[derive(BufferContents, Vertex)]
#[repr(C)]
@ -198,7 +198,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()

View File

@ -23,7 +23,7 @@ use vulkano::{
device::Queue,
format::Format,
image::{view::ImageView, Image, ImageCreateInfo, ImageType, ImageUsage},
memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter},
memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator},
pipeline::{
compute::ComputePipelineCreateInfo, layout::PipelineDescriptorSetLayoutCreateInfo,
ComputePipeline, Pipeline, PipelineBindPoint, PipelineLayout,
@ -46,7 +46,7 @@ pub struct GameOfLifeComputePipeline {
image: Arc<ImageView>,
}
fn rand_grid(memory_allocator: &impl MemoryAllocator, size: [u32; 2]) -> Subbuffer<[u32]> {
fn rand_grid(memory_allocator: Arc<StandardMemoryAllocator>, size: [u32; 2]) -> Subbuffer<[u32]> {
Buffer::from_iter(
memory_allocator,
BufferCreateInfo {
@ -66,8 +66,8 @@ fn rand_grid(memory_allocator: &impl MemoryAllocator, size: [u32; 2]) -> Subbuff
impl GameOfLifeComputePipeline {
pub fn new(app: &App, compute_queue: Arc<Queue>, size: [u32; 2]) -> GameOfLifeComputePipeline {
let memory_allocator = app.context.memory_allocator();
let life_in = rand_grid(memory_allocator, size);
let life_out = rand_grid(memory_allocator, size);
let life_in = rand_grid(memory_allocator.clone(), size);
let life_out = rand_grid(memory_allocator.clone(), size);
let compute_life_pipeline = {
let device = compute_queue.device();
@ -94,7 +94,7 @@ impl GameOfLifeComputePipeline {
let image = ImageView::new_default(
Image::new(
memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,

View File

@ -91,7 +91,7 @@ impl PixelsDrawPipeline {
let (vertices, indices) = textured_quad(2.0, 2.0);
let memory_allocator = app.context.memory_allocator();
let vertex_buffer = Buffer::from_iter(
memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -105,7 +105,7 @@ impl PixelsDrawPipeline {
)
.unwrap();
let index_buffer = Buffer::from_iter(
memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::INDEX_BUFFER,
..Default::default()

View File

@ -12,7 +12,7 @@
// multiple perspectives or cameras are very similar like in virtual reality or other types of
// stereoscopic rendering where the left and right eye only differ in a small position offset.
use std::{fs::File, io::BufWriter, path::Path};
use std::{fs::File, io::BufWriter, path::Path, sync::Arc};
use vulkano::{
buffer::{Buffer, BufferContents, BufferCreateInfo, BufferUsage, Subbuffer},
command_buffer::{
@ -134,10 +134,10 @@ fn main() {
let queue = queues.next().unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let image = Image::new(
&memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::B8G8R8A8_SRGB,
@ -171,7 +171,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -313,7 +313,7 @@ fn main() {
let create_buffer = || {
Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_DST,
..Default::default()

View File

@ -150,7 +150,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
#[derive(BufferContents, Vertex)]
#[repr(C)]
@ -206,7 +206,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -360,7 +360,7 @@ fn main() {
&images,
render_pass.clone(),
&mut viewport,
&memory_allocator,
memory_allocator.clone(),
);
let mut recreate_swapchain = false;
@ -401,7 +401,7 @@ fn main() {
&new_images,
render_pass.clone(),
&mut viewport,
&memory_allocator,
memory_allocator.clone(),
);
recreate_swapchain = false;
}
@ -565,7 +565,7 @@ fn window_size_dependent_setup(
images: &[Arc<Image>],
render_pass: Arc<RenderPass>,
viewport: &mut Viewport,
memory_allocator: &StandardMemoryAllocator,
memory_allocator: Arc<StandardMemoryAllocator>,
) -> Vec<Arc<Framebuffer>> {
let extent = images[0].extent();
viewport.extent = [extent[0] as f32, extent[1] as f32];

View File

@ -12,6 +12,7 @@
// modifying and binding descriptor sets for each update. As a result, they are expected to
// outperform such memory-backed resource updates.
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferCreateInfo, BufferUsage},
command_buffer::{
@ -140,13 +141,13 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
let data_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::STORAGE_BUFFER,
..Default::default()

View File

@ -151,7 +151,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
#[derive(BufferContents, Vertex)]
#[repr(C)]
@ -175,7 +175,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -223,7 +223,7 @@ fn main() {
let extent = [info.width, info.height, 1];
let upload_buffer = Buffer::new_slice(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
@ -242,7 +242,7 @@ fn main() {
.unwrap();
let image = Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_SRGB,

View File

@ -236,7 +236,7 @@ fn main() {
let mut recreate_swapchain = false;
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
#[derive(BufferContents, Vertex)]
#[repr(C)]
@ -262,7 +262,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()

View File

@ -163,7 +163,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
#[derive(BufferContents, Vertex)]
#[repr(C)]
@ -239,7 +239,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -289,7 +289,7 @@ fn main() {
let extent = [info.width, info.height, 1];
let upload_buffer = Buffer::new_slice(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
@ -308,7 +308,7 @@ fn main() {
.unwrap();
let image = Image::new(
&memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_SRGB,
@ -338,7 +338,7 @@ fn main() {
let extent = [info.width, info.height, 1];
let upload_buffer = Buffer::new_slice(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
@ -357,7 +357,7 @@ fn main() {
.unwrap();
let image = Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_SRGB,

View File

@ -11,6 +11,7 @@
// and then we use `copy_buffer_dimensions` to copy the first half of the input buffer to the
// second half.
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferCreateInfo, BufferUsage},
command_buffer::{
@ -132,13 +133,13 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
let data_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::STORAGE_BUFFER
| BufferUsage::TRANSFER_SRC

View File

@ -11,6 +11,7 @@
// source code. The boilerplate is taken from the "basic-compute-shader.rs" example, where most of
// the boilerplate is explained.
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferCreateInfo, BufferUsage},
command_buffer::{
@ -140,13 +141,13 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
let data_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::STORAGE_BUFFER,
..Default::default()

View File

@ -232,14 +232,14 @@ fn main() {
future.wait(None).unwrap();
}
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
// Prepare test array `[0, 1, 2, 3....]`.
let data_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::STORAGE_BUFFER,
..Default::default()

View File

@ -326,7 +326,7 @@ fn main() {
}
}
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
@ -353,7 +353,7 @@ fn main() {
// Create a CPU-accessible buffer initialized with the vertex data.
let temporary_accessible_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
// Specify this buffer will be used as a transfer source.
usage: BufferUsage::TRANSFER_SRC,
@ -372,7 +372,7 @@ fn main() {
// Create a buffer in device-local memory with enough space for `PARTICLE_COUNT` number of
// `Vertex`.
let device_local_buffer = Buffer::new_slice::<Vertex>(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
// Specify use as a storage buffer, vertex buffer, and transfer destination.
usage: BufferUsage::STORAGE_BUFFER

View File

@ -9,6 +9,7 @@
// TODO: Give a paragraph about what specialization are and what problems they solve.
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferCreateInfo, BufferUsage},
command_buffer::{
@ -140,13 +141,13 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let descriptor_set_allocator = StandardDescriptorSetAllocator::new(device.clone());
let command_buffer_allocator =
StandardCommandBufferAllocator::new(device.clone(), Default::default());
let data_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::STORAGE_BUFFER,
..Default::default()

View File

@ -162,7 +162,7 @@ fn main() {
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -176,7 +176,7 @@ fn main() {
)
.unwrap();
let normals_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -190,7 +190,7 @@ fn main() {
)
.unwrap();
let index_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::INDEX_BUFFER,
..Default::default()
@ -247,7 +247,7 @@ fn main() {
.unwrap();
let (mut pipeline, mut framebuffers) = window_size_dependent_setup(
&memory_allocator,
memory_allocator.clone(),
vs.clone(),
fs.clone(),
&images,
@ -295,7 +295,7 @@ fn main() {
swapchain = new_swapchain;
let (new_pipeline, new_framebuffers) = window_size_dependent_setup(
&memory_allocator,
memory_allocator.clone(),
vs.clone(),
fs.clone(),
&new_images,
@ -436,12 +436,13 @@ fn main() {
/// This function is called once during initialization, then again whenever the window is resized.
fn window_size_dependent_setup(
memory_allocator: &StandardMemoryAllocator,
memory_allocator: Arc<StandardMemoryAllocator>,
vs: EntryPoint,
fs: EntryPoint,
images: &[Arc<Image>],
render_pass: Arc<RenderPass>,
) -> (Arc<GraphicsPipeline>, Vec<Arc<Framebuffer>>) {
let device = memory_allocator.device().clone();
let extent = images[0].extent();
let depth_buffer = ImageView::new_default(
@ -480,7 +481,6 @@ fn window_size_dependent_setup(
// driver to optimize things, at the cost of slower window resizes.
// https://computergraphics.stackexchange.com/questions/5742/vulkan-best-way-of-updating-pipeline-viewport
let pipeline = {
let device = memory_allocator.device();
let vertex_input_state = [Position::per_vertex(), Normal::per_vertex()]
.definition(&vs.info().input_interface)
.unwrap();
@ -498,7 +498,7 @@ fn window_size_dependent_setup(
let subpass = Subpass::from(render_pass, 0).unwrap();
GraphicsPipeline::new(
device.clone(),
device,
None,
GraphicsPipelineCreateInfo {
stages: stages.into_iter().collect(),

View File

@ -262,7 +262,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
#[derive(BufferContents, Vertex)]
#[repr(C)]
@ -301,7 +301,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()

View File

@ -157,7 +157,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
#[derive(BufferContents, Vertex)]
#[repr(C)]
@ -181,7 +181,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
@ -235,7 +235,7 @@ fn main() {
.product::<DeviceSize>()
* array_layers as DeviceSize;
let upload_buffer = Buffer::new_slice(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
@ -266,7 +266,7 @@ fn main() {
}
let image = Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format,

View File

@ -287,7 +287,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
// We now create a buffer that will store the shape of our triangle. We use `#[repr(C)]` here
// to force rustc to use a defined layout for our data, as the default representation has *no
@ -311,7 +311,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()

View File

@ -256,7 +256,7 @@ fn main() {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
// We now create a buffer that will store the shape of our triangle. We use `#[repr(C)]` here
// to force rustc to use a defined layout for our data, as the default representation has *no
@ -280,7 +280,7 @@ fn main() {
},
];
let vertex_buffer = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()

View File

@ -225,7 +225,7 @@ impl VulkanoWindowRenderer {
let final_view_image = self.final_views[0].image();
let image = ImageView::new_default(
Image::new(
&self.memory_allocator,
self.memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format,

View File

@ -134,7 +134,7 @@ const MAX_ARENAS: usize = 32;
/// }
/// ```
#[derive(Debug)]
pub struct SubbufferAllocator<A = Arc<StandardMemoryAllocator>> {
pub struct SubbufferAllocator<A = StandardMemoryAllocator> {
state: UnsafeCell<SubbufferAllocatorState<A>>,
}
@ -143,7 +143,7 @@ where
A: MemoryAllocator,
{
/// Creates a new `SubbufferAllocator`.
pub fn new(memory_allocator: A, create_info: SubbufferAllocatorCreateInfo) -> Self {
pub fn new(memory_allocator: Arc<A>, create_info: SubbufferAllocatorCreateInfo) -> Self {
let SubbufferAllocatorCreateInfo {
arena_size,
buffer_usage,
@ -278,7 +278,7 @@ where
#[derive(Debug)]
struct SubbufferAllocatorState<A> {
memory_allocator: A,
memory_allocator: Arc<A>,
buffer_usage: BufferUsage,
memory_type_filter: MemoryTypeFilter,
// The alignment required for the subbuffers.
@ -358,7 +358,7 @@ where
fn create_arena(&self) -> Result<Arc<Buffer>, MemoryAllocatorError> {
Buffer::new(
&self.memory_allocator,
self.memory_allocator.clone(),
BufferCreateInfo {
usage: self.buffer_usage,
..Default::default()
@ -455,7 +455,7 @@ mod tests {
#[test]
fn reserve() {
let (device, _) = gfx_dev_and_queue!();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer_allocator = SubbufferAllocator::new(
memory_allocator,
@ -473,7 +473,7 @@ mod tests {
#[test]
fn capacity_increase() {
let (device, _) = gfx_dev_and_queue!();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer_allocator = SubbufferAllocator::new(
memory_allocator,

View File

@ -85,11 +85,11 @@ use crate::{
macros::{vulkan_bitflags, vulkan_enum},
memory::{
allocator::{
AllocationCreateInfo, AllocationType, DeviceLayout, MemoryAlloc, MemoryAllocator,
AllocationCreateInfo, AllocationType, DeviceLayout, MemoryAllocator,
MemoryAllocatorError,
},
DedicatedAllocation, ExternalMemoryHandleType, ExternalMemoryHandleTypes,
ExternalMemoryProperties, MemoryRequirements,
ExternalMemoryProperties, MemoryRequirements, ResourceMemory,
},
range_map::RangeMap,
sync::{future::AccessError, AccessConflict, CurrentAccess, Sharing},
@ -141,7 +141,7 @@ pub mod view;
///
/// # let device: std::sync::Arc<vulkano::device::Device> = return;
/// # let queue: std::sync::Arc<vulkano::device::Queue> = return;
/// # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return;
/// # let memory_allocator: std::sync::Arc<vulkano::memory::allocator::StandardMemoryAllocator> = return;
/// # let command_buffer_allocator: vulkano::command_buffer::allocator::StandardCommandBufferAllocator = return;
/// #
/// // Simple iterator to construct test data.
@ -149,7 +149,7 @@ pub mod view;
///
/// // Create a host-accessible buffer initialized with the data.
/// let temporary_accessible_buffer = Buffer::from_iter(
/// &memory_allocator,
/// memory_allocator.clone(),
/// BufferCreateInfo {
/// // Specify that this buffer will be used as a transfer source.
/// usage: BufferUsage::TRANSFER_SRC,
@ -167,7 +167,7 @@ pub mod view;
///
/// // Create a buffer in device-local memory with enough space for a slice of `10_000` floats.
/// let device_local_buffer = Buffer::new_slice::<f32>(
/// &memory_allocator,
/// memory_allocator.clone(),
/// BufferCreateInfo {
/// // Specify use as a storage buffer and transfer destination.
/// usage: BufferUsage::STORAGE_BUFFER | BufferUsage::TRANSFER_DST,
@ -219,7 +219,7 @@ pub enum BufferMemory {
/// The buffer is backed by normal memory, bound with [`bind_memory`].
///
/// [`bind_memory`]: RawBuffer::bind_memory
Normal(MemoryAlloc),
Normal(ResourceMemory),
/// The buffer is backed by sparse memory, bound with [`bind_sparse`].
///
@ -237,10 +237,10 @@ impl Buffer {
///
/// # Panics
///
/// - Panics if `buffer_info.size` is not zero.
/// - Panics if `create_info.size` is not zero.
/// - Panics if the chosen memory type is not host-visible.
pub fn from_data<T>(
allocator: &(impl MemoryAllocator + ?Sized),
allocator: Arc<dyn MemoryAllocator>,
create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
data: T,
@ -267,11 +267,11 @@ impl Buffer {
///
/// # Panics
///
/// - Panics if `buffer_info.size` is not zero.
/// - Panics if `create_info.size` is not zero.
/// - Panics if the chosen memory type is not host-visible.
/// - Panics if `iter` is empty.
pub fn from_iter<T, I>(
allocator: &(impl MemoryAllocator + ?Sized),
allocator: Arc<dyn MemoryAllocator>,
create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
iter: I,
@ -307,7 +307,7 @@ impl Buffer {
///
/// - Panics if `buffer_info.size` is not zero.
pub fn new_sized<T>(
allocator: &(impl MemoryAllocator + ?Sized),
allocator: Arc<dyn MemoryAllocator>,
create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
) -> Result<Subbuffer<T>, Validated<BufferAllocateError>>
@ -333,7 +333,7 @@ impl Buffer {
/// - Panics if `buffer_info.size` is not zero.
/// - Panics if `len` is zero.
pub fn new_slice<T>(
allocator: &(impl MemoryAllocator + ?Sized),
allocator: Arc<dyn MemoryAllocator>,
create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
len: DeviceSize,
@ -352,7 +352,7 @@ impl Buffer {
/// - Panics if `buffer_info.size` is not zero.
/// - Panics if `len` is zero.
pub fn new_unsized<T>(
allocator: &(impl MemoryAllocator + ?Sized),
allocator: Arc<dyn MemoryAllocator>,
create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
len: DeviceSize,
@ -379,7 +379,7 @@ impl Buffer {
/// - Panics if `buffer_info.size` is not zero.
/// - Panics if `layout.alignment()` is greater than 64.
pub fn new(
allocator: &(impl MemoryAllocator + ?Sized),
allocator: Arc<dyn MemoryAllocator>,
mut create_info: BufferCreateInfo,
allocation_info: AllocationCreateInfo,
layout: DeviceLayout,
@ -412,6 +412,7 @@ impl Buffer {
Some(DedicatedAllocation::Buffer(&raw_buffer)),
)
.map_err(BufferAllocateError::AllocateMemory)?;
let allocation = unsafe { ResourceMemory::from_allocation(allocator, allocation) };
let buffer = raw_buffer.bind_memory(allocation).map_err(|(err, _, _)| {
err.map(BufferAllocateError::BindMemory)

View File

@ -301,7 +301,7 @@ where
/// 64. [`SubbufferAllocator`] does this automatically.
///
/// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
/// [`invalidate_range`]: crate::memory::allocator::MemoryAlloc::invalidate_range
/// [`invalidate_range`]: crate::memory::ResourceMemory::invalidate_range
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
/// [`write`]: Self::write
/// [`SubbufferAllocator`]: super::allocator::SubbufferAllocator
@ -312,8 +312,8 @@ where
};
let range = if let Some(atom_size) = allocation.atom_size() {
// This works because the suballocators align allocations to the non-coherent atom size
// when the memory is host-visible but not host-coherent.
// This works because the memory allocator must align allocations to the non-coherent
// atom size when the memory is host-visible but not host-coherent.
let start = align_down(self.offset, atom_size);
let end = cmp::min(
align_up(self.offset + self.size, atom_size),
@ -387,7 +387,7 @@ where
/// does this automatically.
///
/// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
/// [`flush_range`]: crate::memory::allocator::MemoryAlloc::flush_range
/// [`flush_range`]: crate::memory::ResourceMemory::flush_range
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
/// [`read`]: Self::read
/// [`SubbufferAllocator`]: super::allocator::SubbufferAllocator
@ -398,8 +398,8 @@ where
};
let range = if let Some(atom_size) = allocation.atom_size() {
// This works because the suballocators align allocations to the non-coherent atom size
// when the memory is host-visible but not host-coherent.
// This works because the memory allocator must align allocations to the non-coherent
// atom size when the memory is host-visible but not host-coherent.
let start = align_down(self.offset, atom_size);
let end = cmp::min(
align_up(self.offset + self.size, atom_size),
@ -1142,7 +1142,7 @@ mod tests {
AllocationCreateInfo, AllocationType, DeviceLayout, MemoryAllocator,
StandardMemoryAllocator,
},
MemoryRequirements,
MemoryRequirements, ResourceMemory,
},
};
@ -1211,10 +1211,10 @@ mod tests {
#[test]
fn split_at() {
let (device, _) = gfx_dev_and_queue!();
let allocator = StandardMemoryAllocator::new_default(device);
let allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer = Buffer::new_slice::<u32>(
&allocator,
allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
@ -1248,7 +1248,7 @@ mod tests {
#[test]
fn cast_aligned() {
let (device, _) = gfx_dev_and_queue!();
let allocator = StandardMemoryAllocator::new_default(device.clone());
let allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let raw_buffer = RawBuffer::new(
device,
@ -1288,6 +1288,7 @@ mod tests {
None,
)
.unwrap();
let allocation = unsafe { ResourceMemory::from_allocation(allocator, allocation) };
let buffer = Buffer::from_raw(raw_buffer, BufferMemory::Normal(allocation));
let buffer = Subbuffer::from(Arc::new(buffer));

View File

@ -20,9 +20,9 @@ use crate::{
instance::InstanceOwnedDebugWrapper,
macros::impl_id_counter,
memory::{
allocator::{AllocationType, DeviceLayout, MemoryAlloc},
allocator::{AllocationType, DeviceLayout},
is_aligned, DedicatedTo, ExternalMemoryHandleTypes, MemoryAllocateFlags,
MemoryPropertyFlags, MemoryRequirements,
MemoryPropertyFlags, MemoryRequirements, ResourceMemory,
},
sync::Sharing,
DeviceSize, Requires, RequiresAllOf, RequiresOneOf, Validated, ValidationError, Version,
@ -277,8 +277,8 @@ impl RawBuffer {
/// Binds device memory to this buffer.
pub fn bind_memory(
self,
allocation: MemoryAlloc,
) -> Result<Buffer, (Validated<VulkanError>, RawBuffer, MemoryAlloc)> {
allocation: ResourceMemory,
) -> Result<Buffer, (Validated<VulkanError>, RawBuffer, ResourceMemory)> {
if let Err(err) = self.validate_bind_memory(&allocation) {
return Err((err.into(), self, allocation));
}
@ -287,7 +287,10 @@ impl RawBuffer {
.map_err(|(err, buffer, allocation)| (err.into(), buffer, allocation))
}
fn validate_bind_memory(&self, allocation: &MemoryAlloc) -> Result<(), Box<ValidationError>> {
fn validate_bind_memory(
&self,
allocation: &ResourceMemory,
) -> Result<(), Box<ValidationError>> {
assert_ne!(allocation.allocation_type(), AllocationType::NonLinear);
let physical_device = self.device().physical_device();
@ -497,8 +500,8 @@ impl RawBuffer {
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn bind_memory_unchecked(
self,
allocation: MemoryAlloc,
) -> Result<Buffer, (VulkanError, RawBuffer, MemoryAlloc)> {
allocation: ResourceMemory,
) -> Result<Buffer, (VulkanError, RawBuffer, ResourceMemory)> {
let memory = allocation.device_memory();
let memory_offset = allocation.offset();

View File

@ -25,9 +25,9 @@
//! use vulkano::memory::allocator::AllocationCreateInfo;
//!
//! # let queue: Arc<vulkano::device::Queue> = return;
//! # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return;
//! # let memory_allocator: Arc<vulkano::memory::allocator::StandardMemoryAllocator> = return;
//! let buffer = Buffer::new_slice::<u32>(
//! &memory_allocator,
//! memory_allocator.clone(),
//! BufferCreateInfo {
//! usage: BufferUsage::STORAGE_TEXEL_BUFFER,
//! ..Default::default()
@ -456,15 +456,16 @@ mod tests {
format::Format,
memory::allocator::{AllocationCreateInfo, StandardMemoryAllocator},
};
use std::sync::Arc;
#[test]
fn create_uniform() {
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
let (device, _) = gfx_dev_and_queue!();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer = Buffer::new_slice::<[u8; 4]>(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::UNIFORM_TEXEL_BUFFER,
..Default::default()
@ -488,10 +489,10 @@ mod tests {
fn create_storage() {
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
let (device, _) = gfx_dev_and_queue!();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer = Buffer::new_slice::<[u8; 4]>(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::STORAGE_TEXEL_BUFFER,
..Default::default()
@ -514,10 +515,10 @@ mod tests {
fn create_storage_atomic() {
// `VK_FORMAT_R32_UINT` guaranteed to be a supported format for atomics
let (device, _) = gfx_dev_and_queue!();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer = Buffer::new_slice::<u32>(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::STORAGE_TEXEL_BUFFER,
..Default::default()
@ -540,10 +541,10 @@ mod tests {
fn wrong_usage() {
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
let (device, _) = gfx_dev_and_queue!();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer = Buffer::new_slice::<[u8; 4]>(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_DST, // Dummy value
..Default::default()
@ -568,10 +569,10 @@ mod tests {
#[test]
fn unsupported_format() {
let (device, _) = gfx_dev_and_queue!();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buffer = Buffer::new_slice::<[f64; 4]>(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::UNIFORM_TEXEL_BUFFER | BufferUsage::STORAGE_TEXEL_BUFFER,
..Default::default()

View File

@ -339,6 +339,7 @@ mod tests {
shader::ShaderStages,
sync::GpuFuture,
};
use std::sync::Arc;
#[test]
fn basic_creation() {
@ -376,10 +377,10 @@ mod tests {
.unwrap();
let queue = queues.next().unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let source = Buffer::from_iter(
&memory_allocator,
memory_allocator.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
@ -394,7 +395,7 @@ mod tests {
.unwrap();
let destination = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_DST,
..Default::default()
@ -506,9 +507,9 @@ mod tests {
fn buffer_self_copy_overlapping() {
let (device, queue) = gfx_dev_and_queue!();
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let source = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC | BufferUsage::TRANSFER_DST,
..Default::default()
@ -561,9 +562,9 @@ mod tests {
fn buffer_self_copy_not_overlapping() {
let (device, queue) = gfx_dev_and_queue!();
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let source = Buffer::from_iter(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC | BufferUsage::TRANSFER_DST,
..Default::default()
@ -613,10 +614,10 @@ mod tests {
)
.unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
// Create a tiny test buffer
let buffer = Buffer::from_data(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::TRANSFER_DST,
..Default::default()
@ -712,9 +713,9 @@ mod tests {
)
.unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let buf = Buffer::from_data(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()

View File

@ -49,7 +49,7 @@
//! types.
//! - Binding [imported] `DeviceMemory`.
//!
//! You can [create a `MemoryAlloc` from `DeviceMemory`] if you want to bind its own block of
//! You can [create a `ResourceMemory` from `DeviceMemory`] if you want to bind its own block of
//! memory to an image.
//!
//! [`ImageView`]: crate::image::view::ImageView
@ -59,7 +59,7 @@
//! [`DeviceMemory`]: crate::memory::DeviceMemory
//! [allocated yourself]: crate::memory::DeviceMemory::allocate
//! [imported]: crate::memory::DeviceMemory::import
//! [create a `MemoryAlloc` from `DeviceMemory`]: MemoryAlloc::new
//! [create a `ResourceMemory` from `DeviceMemory`]: ResourceMemory::new_dedicated
pub use self::{aspect::*, layout::*, sys::ImageCreateInfo, usage::*};
use self::{sys::RawImage, view::ImageViewType};
@ -68,9 +68,9 @@ use crate::{
format::{Format, FormatFeatures},
macros::{vulkan_bitflags, vulkan_bitflags_enum, vulkan_enum},
memory::{
allocator::{AllocationCreateInfo, MemoryAlloc, MemoryAllocator, MemoryAllocatorError},
allocator::{AllocationCreateInfo, MemoryAllocator, MemoryAllocatorError},
DedicatedAllocation, ExternalMemoryHandleType, ExternalMemoryHandleTypes,
ExternalMemoryProperties, MemoryRequirements,
ExternalMemoryProperties, MemoryRequirements, ResourceMemory,
},
range_map::RangeMap,
swapchain::Swapchain,
@ -128,7 +128,7 @@ pub enum ImageMemory {
/// The image is backed by normal memory, bound with [`bind_memory`].
///
/// [`bind_memory`]: RawImage::bind_memory
Normal(SmallVec<[MemoryAlloc; 4]>),
Normal(SmallVec<[ResourceMemory; 4]>),
/// The image is backed by sparse memory, bound with [`bind_sparse`].
///
@ -145,7 +145,7 @@ pub enum ImageMemory {
impl Image {
/// Creates a new uninitialized `Image`.
pub fn new(
allocator: &(impl MemoryAllocator + ?Sized),
allocator: Arc<dyn MemoryAllocator>,
create_info: ImageCreateInfo,
allocation_info: AllocationCreateInfo,
) -> Result<Arc<Self>, Validated<ImageAllocateError>> {
@ -168,6 +168,7 @@ impl Image {
Some(DedicatedAllocation::Image(&raw_image)),
)
.map_err(ImageAllocateError::AllocateMemory)?;
let allocation = unsafe { ResourceMemory::from_allocation(allocator, allocation) };
let image = raw_image.bind_memory([allocation]).map_err(|(err, _, _)| {
err.map(ImageAllocateError::BindMemory)

View File

@ -48,7 +48,7 @@
//! };
//!
//! # let device: std::sync::Arc<vulkano::device::Device> = return;
//! # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return;
//! # let memory_allocator: std::sync::Arc<vulkano::memory::allocator::StandardMemoryAllocator> = return;
//! # let descriptor_set_allocator: vulkano::descriptor_set::allocator::StandardDescriptorSetAllocator = return;
//! #
//! let conversion = SamplerYcbcrConversion::new(device.clone(), SamplerYcbcrConversionCreateInfo {
@ -82,7 +82,7 @@
//! .unwrap();
//!
//! let image = Image::new(
//! &memory_allocator,
//! memory_allocator.clone(),
//! ImageCreateInfo {
//! image_type: ImageType::Dim2d,
//! format: Format::G8_B8_R8_3PLANE_420_UNORM,

View File

@ -33,9 +33,9 @@ use crate::{
instance::InstanceOwnedDebugWrapper,
macros::impl_id_counter,
memory::{
allocator::{AllocationType, DeviceLayout, MemoryAlloc},
allocator::{AllocationType, DeviceLayout},
is_aligned, DedicatedTo, ExternalMemoryHandleTypes, MemoryPropertyFlags,
MemoryRequirements,
MemoryRequirements, ResourceMemory,
},
sync::Sharing,
Requires, RequiresAllOf, RequiresOneOf, Validated, ValidationError, Version, VulkanError,
@ -707,13 +707,13 @@ impl RawImage {
/// `allocations` must contain exactly `self.drm_format_modifier().unwrap().1` elements.
pub fn bind_memory(
self,
allocations: impl IntoIterator<Item = MemoryAlloc>,
allocations: impl IntoIterator<Item = ResourceMemory>,
) -> Result<
Image,
(
Validated<VulkanError>,
RawImage,
impl ExactSizeIterator<Item = MemoryAlloc>,
impl ExactSizeIterator<Item = ResourceMemory>,
),
> {
let allocations: SmallVec<[_; 4]> = allocations.into_iter().collect();
@ -736,7 +736,7 @@ impl RawImage {
fn validate_bind_memory(
&self,
allocations: &[MemoryAlloc],
allocations: &[ResourceMemory],
) -> Result<(), Box<ValidationError>> {
let physical_device = self.device().physical_device();
@ -1072,13 +1072,13 @@ impl RawImage {
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn bind_memory_unchecked(
self,
allocations: impl IntoIterator<Item = MemoryAlloc>,
allocations: impl IntoIterator<Item = ResourceMemory>,
) -> Result<
Image,
(
VulkanError,
RawImage,
impl ExactSizeIterator<Item = MemoryAlloc>,
impl ExactSizeIterator<Item = ResourceMemory>,
),
> {
let allocations: SmallVec<[_; 4]> = allocations.into_iter().collect();

View File

@ -10,10 +10,10 @@
//! In Vulkan, suballocation of [`DeviceMemory`] is left to the application, because every
//! application has slightly different needs and one can not incorporate an allocator into the
//! driver that would perform well in all cases. Vulkano stays true to this sentiment, but aims to
//! reduce the burden on the user as much as possible. You have a toolbox of configurable
//! [suballocators] to choose from that cover all allocation algorithms, which you can compose into
//! any kind of [hierarchy] you wish. This way you have maximum flexibility while still only using
//! a few `DeviceMemory` blocks and not writing any of the very error-prone code.
//! reduce the burden on the user as much as possible. You have a toolbox of [suballocators] to
//! choose from that cover all allocation algorithms, which you can compose into any kind of
//! [hierarchy] you wish. This way you have maximum flexibility while still only using a few
//! `DeviceMemory` blocks and not writing any of the very error-prone code.
//!
//! If you just want to allocate memory and don't have any special needs, look no further than the
//! [`StandardMemoryAllocator`].
@ -223,8 +223,8 @@ use self::array_vec::ArrayVec;
pub use self::{
layout::DeviceLayout,
suballocator::{
AllocationType, BuddyAllocator, BumpAllocator, FreeListAllocator, MemoryAlloc,
PoolAllocator, SuballocationCreateInfo, Suballocator, SuballocatorError,
AllocationType, BuddyAllocator, BumpAllocator, FreeListAllocator, Suballocation,
Suballocator, SuballocatorError,
},
};
use super::{
@ -242,8 +242,9 @@ use ash::vk::{MAX_MEMORY_HEAPS, MAX_MEMORY_TYPES};
use parking_lot::RwLock;
use std::{
error::Error,
fmt::{Display, Error as FmtError, Formatter},
fmt::{Debug, Display, Error as FmtError, Formatter},
ops::BitOr,
ptr,
sync::Arc,
};
@ -253,7 +254,11 @@ const M: DeviceSize = 1024 * K;
const G: DeviceSize = 1024 * M;
/// General-purpose memory allocators which allocate from any memory type dynamically as needed.
pub unsafe trait MemoryAllocator: DeviceOwned {
///
/// # Safety
///
/// TODO
pub unsafe trait MemoryAllocator: DeviceOwned + Send + Sync + 'static {
/// Finds the most suitable memory type index in `memory_type_bits` using the given `filter`.
/// Returns [`None`] if the requirements are too strict and no memory type is able to satisfy
/// them.
@ -269,12 +274,17 @@ pub unsafe trait MemoryAllocator: DeviceOwned {
///
/// - `memory_type_index` - The index of the memory type to allocate from.
///
/// - `layout` - The layout of the allocation.
///
/// - `allocation_type` - The type of resources that can be bound to the allocation.
///
/// - `never_allocate` - If `true` then the allocator should never allocate `DeviceMemory`,
/// instead only suballocate from existing blocks.
fn allocate_from_type(
&self,
memory_type_index: u32,
create_info: SuballocationCreateInfo,
layout: DeviceLayout,
allocation_type: AllocationType,
never_allocate: bool,
) -> Result<MemoryAlloc, MemoryAllocatorError>;
@ -319,14 +329,27 @@ pub unsafe trait MemoryAllocator: DeviceOwned {
dedicated_allocation: Option<DedicatedAllocation<'_>>,
) -> Result<MemoryAlloc, MemoryAllocatorError>;
/// Creates a root allocation/dedicated allocation.
/// Creates an allocation with a whole device memory block dedicated to it.
fn allocate_dedicated(
&self,
memory_type_index: u32,
allocation_size: DeviceSize,
dedicated_allocation: Option<DedicatedAllocation<'_>>,
export_handle_types: ExternalMemoryHandleTypes,
) -> Result<MemoryAlloc, Validated<VulkanError>>;
) -> Result<MemoryAlloc, MemoryAllocatorError>;
/// Deallocates the given `allocation`.
///
/// # Safety
///
/// - `allocation` must refer to a **currently allocated** allocation of `self`.
unsafe fn deallocate(&self, allocation: MemoryAlloc);
}
impl Debug for dyn MemoryAllocator {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), FmtError> {
f.debug_struct("MemoryAllocator").finish_non_exhaustive()
}
}
/// Describes what memory property flags are required, preferred and not preferred when picking a
@ -343,12 +366,12 @@ pub unsafe trait MemoryAllocator: DeviceOwned {
/// # memory::allocator::{AllocationCreateInfo, MemoryTypeFilter},
/// # };
/// #
/// # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return;
/// # let memory_allocator: std::sync::Arc<vulkano::memory::allocator::StandardMemoryAllocator> = return;
/// # let format = return;
/// # let extent = return;
/// #
/// let texture = Image::new(
/// &memory_allocator,
/// memory_allocator.clone(),
/// ImageCreateInfo {
/// format,
/// extent,
@ -373,10 +396,10 @@ pub unsafe trait MemoryAllocator: DeviceOwned {
/// # memory::allocator::{AllocationCreateInfo, MemoryTypeFilter},
/// # };
/// #
/// # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return;
/// # let memory_allocator: std::sync::Arc<vulkano::memory::allocator::StandardMemoryAllocator> = return;
/// #
/// let staging_buffer = Buffer::new_sized(
/// &memory_allocator,
/// memory_allocator.clone(),
/// BufferCreateInfo {
/// usage: BufferUsage::TRANSFER_SRC,
/// ..Default::default()
@ -401,10 +424,10 @@ pub unsafe trait MemoryAllocator: DeviceOwned {
/// # memory::allocator::{AllocationCreateInfo, MemoryTypeFilter},
/// # };
/// #
/// # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return;
/// # let memory_allocator: std::sync::Arc<vulkano::memory::allocator::StandardMemoryAllocator> = return;
/// #
/// let uniform_buffer = Buffer::new_sized(
/// &memory_allocator,
/// memory_allocator.clone(),
/// BufferCreateInfo {
/// usage: BufferUsage::UNIFORM_BUFFER,
/// ..Default::default()
@ -428,10 +451,10 @@ pub unsafe trait MemoryAllocator: DeviceOwned {
/// # memory::allocator::{AllocationCreateInfo, MemoryTypeFilter},
/// # };
/// #
/// # let memory_allocator: vulkano::memory::allocator::StandardMemoryAllocator = return;
/// # let memory_allocator: std::sync::Arc<vulkano::memory::allocator::StandardMemoryAllocator> = return;
/// #
/// let readback_buffer = Buffer::new_sized(
/// &memory_allocator,
/// memory_allocator.clone(),
/// BufferCreateInfo {
/// usage: BufferUsage::TRANSFER_DST,
/// ..Default::default()
@ -640,7 +663,6 @@ impl Default for AllocationCreateInfo {
/// Describes whether allocating [`DeviceMemory`] is desired.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[non_exhaustive]
pub enum MemoryAllocatePreference {
/// There is no known preference, let the allocator decide.
Unknown,
@ -658,6 +680,38 @@ pub enum MemoryAllocatePreference {
AlwaysAllocate,
}
/// An allocation made using a [memory allocator].
///
/// [memory allocator]: MemoryAllocator
#[derive(Clone, Debug)]
pub struct MemoryAlloc {
/// The underlying block of device memory.
pub device_memory: Arc<DeviceMemory>,
/// The suballocation within the device memory block, or [`None`] if this is a dedicated
/// allocation.
pub suballocation: Option<Suballocation>,
/// The type of resources that can be bound to this memory block. This will be exactly equal to
/// the requested allocation type.
///
/// For dedicated allocations it doesn't matter what this is, as there aren't going to be any
/// neighboring suballocations. Therefore the allocator implementation is advised to always set
/// this to [`AllocationType::Unknown`] in that case for maximum flexibility.
pub allocation_type: AllocationType,
/// An opaque handle identifying the allocation inside the allocator.
pub allocation_handle: AllocationHandle,
}
/// An opaque handle identifying an allocation inside an allocator.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
#[repr(transparent)]
pub struct AllocationHandle(pub *mut ());
unsafe impl Send for AllocationHandle {}
unsafe impl Sync for AllocationHandle {}
/// Error that can be returned when creating an [allocation] using a [memory allocator].
///
/// [allocation]: MemoryAlloc
@ -690,12 +744,6 @@ pub enum MemoryAllocatorError {
/// This is returned when using [`MemoryAllocatePreference::NeverAllocate`] and the allocation
/// size exceeded the block size for all heaps of suitable memory types.
BlockSizeExceeded,
/// The block size for the suballocator was exceeded.
///
/// This is returned when using [`GenericMemoryAllocator<Arc<PoolAllocator<BLOCK_SIZE>>>`] if
/// the allocation size exceeded `BLOCK_SIZE`.
SuballocatorBlockSizeExceeded,
}
impl Error for MemoryAllocatorError {
@ -720,9 +768,6 @@ impl Display for MemoryAllocatorError {
"the allocation size was greater than the block size for all heaps of suitable \
memory types and dedicated allocations were explicitly forbidden"
}
Self::SuballocatorBlockSizeExceeded => {
"the allocation size was greater than the suballocator's block size"
}
};
f.write_str(msg)
@ -735,8 +780,8 @@ impl Display for MemoryAllocatorError {
/// not suited to the task.
///
/// See also [`GenericMemoryAllocator`] for details about the allocation algorithm, and
/// [`FreeListAllocator`] for details about the suballocation algorithm and example usage.
pub type StandardMemoryAllocator = GenericMemoryAllocator<Arc<FreeListAllocator>>;
/// [`FreeListAllocator`] for details about the suballocation algorithm.
pub type StandardMemoryAllocator = GenericMemoryAllocator<FreeListAllocator>;
impl StandardMemoryAllocator {
/// Creates a new `StandardMemoryAllocator` with default configuration.
@ -815,30 +860,31 @@ impl StandardMemoryAllocator {
/// [suballocate]: Suballocator
/// [the `MemoryAllocator` implementation]: Self#impl-MemoryAllocator-for-GenericMemoryAllocator<S>
#[derive(Debug)]
pub struct GenericMemoryAllocator<S: Suballocator> {
pub struct GenericMemoryAllocator<S> {
device: InstanceOwnedDebugWrapper<Arc<Device>>,
buffer_image_granularity: DeviceAlignment,
// Each memory type has a pool of `DeviceMemory` blocks.
pools: ArrayVec<Pool<S>, MAX_MEMORY_TYPES>,
// Each memory heap has its own block size.
block_sizes: ArrayVec<DeviceSize, MAX_MEMORY_HEAPS>,
allocation_type: AllocationType,
// Global mask of memory types.
memory_type_bits: u32,
dedicated_allocation: bool,
export_handle_types: ArrayVec<ExternalMemoryHandleTypes, MAX_MEMORY_TYPES>,
flags: MemoryAllocateFlags,
// Global mask of memory types.
memory_type_bits: u32,
// How many `DeviceMemory` allocations should be allowed before restricting them.
max_allocations: u32,
}
#[derive(Debug)]
struct Pool<S> {
blocks: RwLock<Vec<S>>,
blocks: RwLock<Vec<Box<Block<S>>>>,
// This is cached here for faster access, so we don't need to hop through 3 pointers.
memory_type: ash::vk::MemoryType,
atom_size: DeviceAlignment,
}
impl<S: Suballocator> GenericMemoryAllocator<S> {
impl<S> GenericMemoryAllocator<S> {
// This is a false-positive, we only use this const for static initialization.
#[allow(clippy::declare_interior_mutable_const)]
const EMPTY_POOL: Pool<S> = Pool {
@ -847,6 +893,7 @@ impl<S: Suballocator> GenericMemoryAllocator<S> {
property_flags: ash::vk::MemoryPropertyFlags::empty(),
heap_index: 0,
},
atom_size: DeviceAlignment::MIN,
};
/// Creates a new `GenericMemoryAllocator<S>` using the provided suballocator `S` for
@ -857,7 +904,6 @@ impl<S: Suballocator> GenericMemoryAllocator<S> {
/// - Panics if `create_info.block_sizes` is not sorted by threshold.
/// - Panics if `create_info.block_sizes` contains duplicate thresholds.
/// - Panics if `create_info.block_sizes` does not contain a baseline threshold of `0`.
/// - Panics if the block size for a heap exceeds the size of the heap.
pub fn new(
device: Arc<Device>,
create_info: GenericMemoryAllocatorCreateInfo<'_, '_>,
@ -886,13 +932,17 @@ impl<S: Suballocator> GenericMemoryAllocator<S> {
let GenericMemoryAllocatorCreateInfo {
block_sizes,
memory_type_bits,
allocation_type,
dedicated_allocation,
export_handle_types,
mut device_address,
_ne: _,
} = create_info;
let buffer_image_granularity = device
.physical_device()
.properties()
.buffer_image_granularity;
let MemoryProperties {
memory_types,
memory_heaps,
@ -900,11 +950,24 @@ impl<S: Suballocator> GenericMemoryAllocator<S> {
let mut pools = ArrayVec::new(memory_types.len(), [Self::EMPTY_POOL; MAX_MEMORY_TYPES]);
for (i, memory_type) in memory_types.iter().enumerate() {
for (
i,
&MemoryType {
property_flags,
heap_index,
},
) in memory_types.iter().enumerate()
{
pools[i].memory_type = ash::vk::MemoryType {
property_flags: memory_type.property_flags.into(),
heap_index: memory_type.heap_index,
property_flags: property_flags.into(),
heap_index,
};
if property_flags.intersects(MemoryPropertyFlags::HOST_VISIBLE)
&& !property_flags.intersects(MemoryPropertyFlags::HOST_COHERENT)
{
pools[i].atom_size = device.physical_device().properties().non_coherent_atom_size;
}
}
let block_sizes = {
@ -916,9 +979,6 @@ impl<S: Suballocator> GenericMemoryAllocator<S> {
Err(idx) => idx.saturating_sub(1),
};
sizes[i] = block_sizes[idx].1;
// VUID-vkAllocateMemory-pAllocateInfo-01713
assert!(sizes[i] <= memory_heap.size);
}
sizes
@ -955,9 +1015,9 @@ impl<S: Suballocator> GenericMemoryAllocator<S> {
GenericMemoryAllocator {
device: InstanceOwnedDebugWrapper(device),
buffer_image_granularity,
pools,
block_sizes,
allocation_type,
dedicated_allocation,
export_handle_types,
flags,
@ -965,9 +1025,50 @@ impl<S: Suballocator> GenericMemoryAllocator<S> {
max_allocations,
}
}
#[cold]
fn allocate_device_memory(
&self,
memory_type_index: u32,
allocation_size: DeviceSize,
dedicated_allocation: Option<DedicatedAllocation<'_>>,
export_handle_types: ExternalMemoryHandleTypes,
) -> Result<Arc<DeviceMemory>, Validated<VulkanError>> {
let mut memory = DeviceMemory::allocate(
self.device.clone(),
MemoryAllocateInfo {
allocation_size,
memory_type_index,
dedicated_allocation,
export_handle_types,
flags: self.flags,
..Default::default()
},
)?;
if self.pools[memory_type_index as usize]
.memory_type
.property_flags
.intersects(ash::vk::MemoryPropertyFlags::HOST_VISIBLE)
{
// SAFETY:
// - We checked that the memory is host-visible.
// - The memory can't be mapped already, because we just allocated it.
// - Mapping the whole range is always valid.
unsafe {
memory.map_unchecked(MemoryMapInfo {
offset: 0,
size: memory.allocation_size(),
_ne: crate::NonExhaustive(()),
})?;
}
}
Ok(Arc::new(memory))
}
}
unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
unsafe impl<S: Suballocator + Send + Sync + 'static> MemoryAllocator for GenericMemoryAllocator<S> {
fn find_memory_type_index(
&self,
memory_type_bits: u32,
@ -1001,6 +1102,10 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
///
/// - `memory_type_index` - The index of the memory type to allocate from.
///
/// - `layout` - The layout of the allocation.
///
/// - `allocation_type` - The type of resources that can be bound to the allocation.
///
/// - `never_allocate` - If `true` then the allocator should never allocate `DeviceMemory`,
/// instead only suballocate from existing blocks.
///
@ -1026,15 +1131,10 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
fn allocate_from_type(
&self,
memory_type_index: u32,
create_info: SuballocationCreateInfo,
mut layout: DeviceLayout,
allocation_type: AllocationType,
never_allocate: bool,
) -> Result<MemoryAlloc, MemoryAllocatorError> {
let SuballocationCreateInfo {
layout,
allocation_type: _,
_ne: _,
} = create_info;
let size = layout.size();
let pool = &self.pools[memory_type_index as usize];
let block_size = self.block_sizes[pool.memory_type.heap_index as usize];
@ -1043,6 +1143,8 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
return Err(MemoryAllocatorError::BlockSizeExceeded);
}
layout = layout.align_to(pool.atom_size).unwrap();
let mut blocks = if S::IS_BLOCKING {
// If the allocation algorithm needs to block, then there's no point in trying to avoid
// locks here either. In that case the best strategy is to take full advantage of it by
@ -1052,15 +1154,15 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
// huge amount of memory unless you configure your block sizes properly!
let mut blocks = pool.blocks.write();
blocks.sort_by_key(Suballocator::free_size);
let (Ok(idx) | Err(idx)) = blocks.binary_search_by_key(&size, Suballocator::free_size);
blocks.sort_by_key(|block| block.free_size());
let (Ok(idx) | Err(idx)) =
blocks.binary_search_by_key(&size, |block| block.free_size());
for block in &blocks[idx..] {
match block.allocate(create_info.clone()) {
Ok(allocation) => return Ok(allocation),
Err(SuballocatorError::BlockSizeExceeded) => {
return Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded);
}
Err(_) => {}
if let Ok(allocation) =
block.allocate(layout, allocation_type, self.buffer_image_granularity)
{
return Ok(allocation);
}
}
@ -1076,33 +1178,26 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
// has the same performance as trying to allocate.
let blocks = pool.blocks.read();
// Search in reverse order because we always append new blocks at the end.
for block in blocks.iter().rev() {
match block.allocate(create_info.clone()) {
Ok(allocation) => return Ok(allocation),
// This can happen when using the `PoolAllocator<BLOCK_SIZE>` if the allocation
// size is greater than `BLOCK_SIZE`.
Err(SuballocatorError::BlockSizeExceeded) => {
return Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded);
}
Err(_) => {}
if let Ok(allocation) =
block.allocate(layout, allocation_type, self.buffer_image_granularity)
{
return Ok(allocation);
}
}
let len = blocks.len();
drop(blocks);
let blocks = pool.blocks.write();
if blocks.len() > len {
// Another thread beat us to it and inserted a fresh block, try to suballocate it.
match blocks[len].allocate(create_info.clone()) {
Ok(allocation) => return Ok(allocation),
// This can happen if this is the first block that was inserted and when using
// the `PoolAllocator<BLOCK_SIZE>` if the allocation size is greater than
// `BLOCK_SIZE`.
Err(SuballocatorError::BlockSizeExceeded) => {
return Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded);
}
Err(_) => {}
if let Ok(allocation) =
blocks[len].allocate(layout, allocation_type, self.buffer_image_granularity)
{
return Ok(allocation);
}
}
@ -1111,11 +1206,13 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
// For bump allocators, first do a garbage sweep and try to allocate again.
if S::NEEDS_CLEANUP {
blocks.iter_mut().for_each(Suballocator::cleanup);
blocks.sort_unstable_by_key(Suballocator::free_size);
blocks.iter_mut().for_each(|block| block.cleanup());
blocks.sort_unstable_by_key(|block| block.free_size());
if let Some(block) = blocks.last() {
if let Ok(allocation) = block.allocate(create_info.clone()) {
if let Ok(allocation) =
block.allocate(layout, allocation_type, self.buffer_image_granularity)
{
return Ok(allocation);
}
}
@ -1137,14 +1234,14 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
loop {
let allocation_size = block_size >> i;
match self.allocate_dedicated(
match self.allocate_device_memory(
memory_type_index,
allocation_size,
None,
export_handle_types,
) {
Ok(allocation) => {
break S::new(allocation);
Ok(device_memory) => {
break Block::new(device_memory);
}
// Retry up to 3 times, halving the allocation size each time so long as the
// resulting size is still large enough.
@ -1161,18 +1258,13 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
blocks.push(block);
let block = blocks.last().unwrap();
match block.allocate(create_info) {
match block.allocate(layout, allocation_type, self.buffer_image_granularity) {
Ok(allocation) => Ok(allocation),
// This can't happen as we always allocate a block of sufficient size.
Err(SuballocatorError::OutOfRegionMemory) => unreachable!(),
// This can't happen as the block is fresher than Febreze and we're still holding an
// exclusive lock.
Err(SuballocatorError::FragmentedRegion) => unreachable!(),
// This can happen if this is the first block that was inserted and when using the
// `PoolAllocator<BLOCK_SIZE>` if the allocation size is greater than `BLOCK_SIZE`.
Err(SuballocatorError::BlockSizeExceeded) => {
Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded)
}
}
}
@ -1258,12 +1350,6 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
_ne: _,
} = create_info;
let create_info = SuballocationCreateInfo {
layout,
allocation_type,
_ne: crate::NonExhaustive(()),
};
let size = layout.size();
let mut memory_type_index = self
@ -1295,7 +1381,6 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
dedicated_allocation,
export_handle_types,
)
.map_err(MemoryAllocatorError::AllocateDeviceMemory)
} else {
if size > block_size / 2 {
prefers_dedicated_allocation = true;
@ -1313,30 +1398,34 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
dedicated_allocation,
export_handle_types,
)
.map_err(MemoryAllocatorError::AllocateDeviceMemory)
// Fall back to suballocation.
.or_else(|err| {
self.allocate_from_type(
memory_type_index,
create_info.clone(),
layout,
allocation_type,
true, // A dedicated allocation already failed.
)
.map_err(|_| err)
})
} else {
self.allocate_from_type(memory_type_index, create_info.clone(), false)
// Fall back to dedicated allocation. It is possible that the 1/8
// block size tried was greater than the allocation size, so
// there's hope.
.or_else(|_| {
self.allocate_dedicated(
memory_type_index,
size,
dedicated_allocation,
export_handle_types,
)
.map_err(MemoryAllocatorError::AllocateDeviceMemory)
})
self.allocate_from_type(
memory_type_index,
layout,
allocation_type,
false,
)
// Fall back to dedicated allocation. It is possible that the 1/8
// block size tried was greater than the allocation size, so
// there's hope.
.or_else(|_| {
self.allocate_dedicated(
memory_type_index,
size,
dedicated_allocation,
export_handle_types,
)
})
}
}
}
@ -1345,24 +1434,18 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
return Err(MemoryAllocatorError::DedicatedAllocationRequired);
}
self.allocate_from_type(memory_type_index, create_info.clone(), true)
self.allocate_from_type(memory_type_index, layout, allocation_type, true)
}
MemoryAllocatePreference::AlwaysAllocate => self
.allocate_dedicated(
memory_type_index,
size,
dedicated_allocation,
export_handle_types,
)
.map_err(MemoryAllocatorError::AllocateDeviceMemory),
MemoryAllocatePreference::AlwaysAllocate => self.allocate_dedicated(
memory_type_index,
size,
dedicated_allocation,
export_handle_types,
),
};
match res {
Ok(allocation) => return Ok(allocation),
// This is not recoverable.
Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded) => {
return Err(MemoryAllocatorError::SuballocatorBlockSizeExceeded);
}
// Try a different memory type.
Err(err) => {
memory_type_bits &= !(1 << memory_type_index);
@ -1381,43 +1464,54 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
allocation_size: DeviceSize,
dedicated_allocation: Option<DedicatedAllocation<'_>>,
export_handle_types: ExternalMemoryHandleTypes,
) -> Result<MemoryAlloc, Validated<VulkanError>> {
let mut memory = DeviceMemory::allocate(
self.device.clone(),
MemoryAllocateInfo {
allocation_size,
) -> Result<MemoryAlloc, MemoryAllocatorError> {
let device_memory = self
.allocate_device_memory(
memory_type_index,
allocation_size,
dedicated_allocation,
export_handle_types,
flags: self.flags,
..Default::default()
},
)?;
)
.map_err(MemoryAllocatorError::AllocateDeviceMemory)?;
if self.pools[memory_type_index as usize]
.memory_type
.property_flags
.intersects(ash::vk::MemoryPropertyFlags::HOST_VISIBLE)
{
// SAFETY:
// - We checked that the memory is host-visible.
// - The memory can't be mapped already, because we just allocated it.
// - Mapping the whole range is always valid.
unsafe {
memory.map_unchecked(MemoryMapInfo {
offset: 0,
size: memory.allocation_size(),
_ne: crate::NonExhaustive(()),
})?;
Ok(MemoryAlloc {
device_memory,
suballocation: None,
allocation_type: AllocationType::Unknown,
allocation_handle: AllocationHandle(ptr::null_mut()),
})
}
unsafe fn deallocate(&self, allocation: MemoryAlloc) {
if let Some(suballocation) = allocation.suballocation {
let block_ptr = allocation.allocation_handle.0 as *const Block<S>;
// TODO: Maybe do a similar check for dedicated blocks.
#[cfg(debug_assertions)]
{
let memory_type_index = allocation.device_memory.memory_type_index();
let pool = self.pools[memory_type_index as usize].blocks.read();
assert!(
pool.iter()
.any(|block| &**block as *const Block<S> == block_ptr),
"attempted to deallocate a memory block that does not belong to this allocator",
);
}
// SAFETY: The caller must guarantee that `allocation` refers to one allocated by
// `self`, therefore `block_ptr` must be the same one we gave out on allocation. We
// know that this pointer must be valid, because all blocks are boxed and pinned in
// memory and because a block isn't dropped until the allocator itself is dropped, at
// which point it would be impossible to call this method. We also know that it must be
// valid to create a reference to the block, because we only ever access it via shared
// references.
let block = &*block_ptr;
// SAFETY: The caller must guarantee that `allocation` refers to a currently allocated
// allocation of `self`.
block.deallocate(suballocation);
}
let mut allocation = MemoryAlloc::new(memory);
// SAFETY: The memory is freshly allocated.
unsafe { allocation.set_allocation_type(self.allocation_type) };
Ok(allocation)
}
}
@ -1433,10 +1527,11 @@ unsafe impl<T: MemoryAllocator> MemoryAllocator for Arc<T> {
fn allocate_from_type(
&self,
memory_type_index: u32,
create_info: SuballocationCreateInfo,
layout: DeviceLayout,
allocation_type: AllocationType,
never_allocate: bool,
) -> Result<MemoryAlloc, MemoryAllocatorError> {
(**self).allocate_from_type(memory_type_index, create_info, never_allocate)
(**self).allocate_from_type(memory_type_index, layout, allocation_type, never_allocate)
}
fn allocate(
@ -1460,7 +1555,7 @@ unsafe impl<T: MemoryAllocator> MemoryAllocator for Arc<T> {
allocation_size: DeviceSize,
dedicated_allocation: Option<DedicatedAllocation<'_>>,
export_handle_types: ExternalMemoryHandleTypes,
) -> Result<MemoryAlloc, Validated<VulkanError>> {
) -> Result<MemoryAlloc, MemoryAllocatorError> {
(**self).allocate_dedicated(
memory_type_index,
allocation_size,
@ -1468,14 +1563,65 @@ unsafe impl<T: MemoryAllocator> MemoryAllocator for Arc<T> {
export_handle_types,
)
}
unsafe fn deallocate(&self, allocation: MemoryAlloc) {
(**self).deallocate(allocation)
}
}
unsafe impl<S: Suballocator> DeviceOwned for GenericMemoryAllocator<S> {
unsafe impl<S> DeviceOwned for GenericMemoryAllocator<S> {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
#[derive(Debug)]
struct Block<S> {
device_memory: Arc<DeviceMemory>,
suballocator: S,
}
impl<S: Suballocator> Block<S> {
fn new(device_memory: Arc<DeviceMemory>) -> Box<Self> {
let suballocator = S::new(0, device_memory.allocation_size());
Box::new(Block {
device_memory,
suballocator,
})
}
fn allocate(
&self,
layout: DeviceLayout,
allocation_type: AllocationType,
buffer_image_granularity: DeviceAlignment,
) -> Result<MemoryAlloc, SuballocatorError> {
let suballocation =
self.suballocator
.allocate(layout, allocation_type, buffer_image_granularity)?;
Ok(MemoryAlloc {
device_memory: self.device_memory.clone(),
suballocation: Some(suballocation),
allocation_type,
allocation_handle: AllocationHandle(self as *const Block<S> as _),
})
}
unsafe fn deallocate(&self, suballocation: Suballocation) {
self.suballocator.deallocate(suballocation)
}
fn free_size(&self) -> DeviceSize {
self.suballocator.free_size()
}
fn cleanup(&mut self) {
self.suballocator.cleanup();
}
}
/// Parameters to create a new [`GenericMemoryAllocator`].
#[derive(Clone, Debug)]
pub struct GenericMemoryAllocatorCreateInfo<'b, 'e> {
@ -1513,40 +1659,25 @@ pub struct GenericMemoryAllocatorCreateInfo<'b, 'e> {
/// [`PROTECTED`]: MemoryPropertyFlags::DEVICE_COHERENT
pub memory_type_bits: u32,
/// The allocation type that should be used for root allocations.
///
/// You only need to worry about this if you're using [`PoolAllocator`] as the suballocator, as
/// all suballocations that the pool allocator makes inherit their allocation type from the
/// parent allocation. For the [`FreeListAllocator`] and the [`BuddyAllocator`] this must be
/// [`AllocationType::Unknown`] otherwise you will get panics. It does not matter what this is
/// when using the [`BumpAllocator`].
///
/// The default value is [`AllocationType::Unknown`].
pub allocation_type: AllocationType,
/// Whether the allocator should use the dedicated allocation APIs.
///
/// This means that when the allocator decides that an allocation should not be suballocated,
/// but rather have its own block of [`DeviceMemory`], that that allocation will be made a
/// dedicated allocation. Otherwise they are still made free-standing ([root]) allocations,
/// just not [dedicated] ones.
/// dedicated allocation. Otherwise they are still given their own block of device memory, just
/// that that block won't be [dedicated] to a particular resource.
///
/// Dedicated allocations are an optimization which may result in better performance, so there
/// really is no reason to disable this option, unless the restrictions that they bring with
/// them are a problem. Namely, a dedicated allocation must only be used for the resource it
/// was created for. Meaning that [reusing the memory] for something else is not possible,
/// [suballocating it] is not possible, and [aliasing it] is also not possible.
/// was created for. Meaning that reusing the memory for something else is not possible,
/// suballocating it is not possible, and aliasing it is also not possible.
///
/// This option is silently ignored (treated as `false`) if the device API version is below 1.1
/// and the [`khr_dedicated_allocation`] extension is not enabled on the device.
///
/// The default value is `true`.
///
/// [root]: MemoryAlloc::is_root
/// [dedicated]: MemoryAlloc::is_dedicated
/// [reusing the memory]: MemoryAlloc::try_unwrap
/// [suballocating it]: Suballocator
/// [aliasing it]: MemoryAlloc::alias
/// [dedicated]: DeviceMemory::is_dedicated
/// [`khr_dedicated_allocation`]: crate::device::DeviceExtensions::khr_dedicated_allocation
pub dedicated_allocation: bool,
@ -1592,7 +1723,6 @@ impl GenericMemoryAllocatorCreateInfo<'_, '_> {
let &Self {
block_sizes,
memory_type_bits: _,
allocation_type: _,
dedicated_allocation: _,
export_handle_types,
device_address: _,
@ -1651,7 +1781,6 @@ impl Default for GenericMemoryAllocatorCreateInfo<'_, '_> {
GenericMemoryAllocatorCreateInfo {
block_sizes: &[],
memory_type_bits: u32::MAX,
allocation_type: AllocationType::Unknown,
dedicated_allocation: true,
export_handle_types: &[],
device_address: true,

File diff suppressed because it is too large Load Diff

View File

@ -12,7 +12,7 @@ use crate::{
device::{Device, DeviceOwned},
instance::InstanceOwnedDebugWrapper,
macros::{impl_id_counter, vulkan_bitflags, vulkan_bitflags_enum},
memory::{is_aligned, MemoryPropertyFlags},
memory::{allocator::DeviceLayout, is_aligned, MemoryPropertyFlags},
DeviceSize, Requires, RequiresAllOf, RequiresOneOf, Validated, ValidationError, Version,
VulkanError, VulkanObject,
};
@ -284,6 +284,9 @@ impl DeviceMemory {
output.assume_init()
};
// Sanity check: this would lead to UB when suballocating.
assert!(allocation_size <= DeviceLayout::MAX_SIZE);
let atom_size = device.physical_device().properties().non_coherent_atom_size;
let is_coherent = device.physical_device().memory_properties().memory_types
@ -330,6 +333,9 @@ impl DeviceMemory {
_ne: _,
} = allocate_info;
// Sanity check: this would lead to UB when suballocating.
assert!(allocation_size <= DeviceLayout::MAX_SIZE);
let atom_size = device.physical_device().properties().non_coherent_atom_size;
let is_coherent = device.physical_device().memory_properties().memory_types
@ -407,6 +413,10 @@ impl DeviceMemory {
self.atom_size
}
pub(crate) fn is_coherent(&self) -> bool {
self.is_coherent
}
/// Maps a range of memory to be accessed by the host.
///
/// `self` must not be host-mapped already and must be allocated from host-visible memory.

View File

@ -91,18 +91,25 @@
//! get memory from that pool. By default if you don't specify any pool when creating a buffer or
//! an image, an instance of `StandardMemoryPool` that is shared by the `Device` object is used.
use self::allocator::DeviceLayout;
use self::allocator::{
align_up, AllocationHandle, AllocationType, DeviceLayout, MemoryAlloc, MemoryAllocator,
Suballocation,
};
pub use self::{alignment::*, device_memory::*};
use crate::{
buffer::{sys::RawBuffer, Subbuffer},
device::{Device, DeviceOwned, DeviceOwnedDebugWrapper},
image::{sys::RawImage, Image, ImageAspects},
macros::vulkan_bitflags,
sync::semaphore::Semaphore,
DeviceSize,
sync::{semaphore::Semaphore, HostAccessError},
DeviceSize, Validated, ValidationError, VulkanError,
};
use std::{
cmp,
mem::ManuallyDrop,
num::NonZeroU64,
ops::{Bound, Range, RangeBounds, RangeTo},
ptr::{self, NonNull},
sync::Arc,
};
@ -110,6 +117,332 @@ mod alignment;
pub mod allocator;
mod device_memory;
/// Memory that can be bound to resources.
///
/// Most commonly you will want to obtain this by first using a [memory allocator] and then
/// [constructing this object from its allocation]. Alternatively, if you want to bind a whole
/// block of `DeviceMemory` to a resource, or can't go through an allocator, you can use [the
/// dedicated constructor].
///
/// [memory allocator]: allocator::MemoryAllocator
/// [the dedicated constructor]: Self::new_dedicated
#[derive(Debug)]
pub struct ResourceMemory {
device_memory: ManuallyDrop<DeviceOwnedDebugWrapper<Arc<DeviceMemory>>>,
offset: DeviceSize,
size: DeviceSize,
allocation_type: AllocationType,
allocation_handle: AllocationHandle,
suballocation_handle: Option<AllocationHandle>,
allocator: Option<Arc<dyn MemoryAllocator>>,
}
impl ResourceMemory {
/// Creates a new `ResourceMemory` that has a whole device memory block dedicated to it. You
/// may use this when you obtain the memory in a way other than through the use of a memory
/// allocator, for instance by importing memory.
///
/// This is safe because we take ownership of the device memory, so that there can be no
/// aliasing resources. On the other hand, the device memory can never be reused: it will be
/// freed once the returned object is dropped.
pub fn new_dedicated(device_memory: DeviceMemory) -> Self {
unsafe { Self::new_dedicated_unchecked(Arc::new(device_memory)) }
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
pub unsafe fn new_dedicated_unchecked(device_memory: Arc<DeviceMemory>) -> Self {
ResourceMemory {
offset: 0,
size: device_memory.allocation_size(),
allocation_type: AllocationType::Unknown,
allocation_handle: AllocationHandle(ptr::null_mut()),
suballocation_handle: None,
allocator: None,
device_memory: ManuallyDrop::new(DeviceOwnedDebugWrapper(device_memory)),
}
}
/// Creates a new `ResourceMemory` from an allocation of a memory allocator.
///
/// Ownership of `allocation` is semantically transferred to this object, and it is deallocated
/// when the returned object is dropped.
///
/// # Safety
///
/// - `allocation` must refer to a **currently allocated** allocation of `allocator`.
/// - `allocation` must never be deallocated.
#[inline]
pub unsafe fn from_allocation(
allocator: Arc<dyn MemoryAllocator>,
allocation: MemoryAlloc,
) -> Self {
if let Some(suballocation) = allocation.suballocation {
ResourceMemory {
offset: suballocation.offset,
size: suballocation.size,
allocation_type: allocation.allocation_type,
allocation_handle: allocation.allocation_handle,
suballocation_handle: Some(suballocation.handle),
allocator: Some(allocator),
device_memory: ManuallyDrop::new(DeviceOwnedDebugWrapper(allocation.device_memory)),
}
} else {
ResourceMemory {
offset: 0,
size: allocation.device_memory.allocation_size(),
allocation_type: allocation.allocation_type,
allocation_handle: allocation.allocation_handle,
suballocation_handle: None,
allocator: Some(allocator),
device_memory: ManuallyDrop::new(DeviceOwnedDebugWrapper(allocation.device_memory)),
}
}
}
/// Returns the underlying block of [`DeviceMemory`].
#[inline]
pub fn device_memory(&self) -> &Arc<DeviceMemory> {
&self.device_memory
}
/// Returns the offset (in bytes) within the [`DeviceMemory`] block where this `ResourceMemory`
/// beings.
///
/// If this `ResourceMemory` is not a [suballocation], then this will be `0`.
///
/// [suballocation]: Suballocation
#[inline]
pub fn offset(&self) -> DeviceSize {
self.offset
}
/// Returns the size (in bytes) of the `ResourceMemory`.
///
/// If this `ResourceMemory` is not a [suballocation], then this will be equal to the
/// [allocation size] of the [`DeviceMemory`] block.
///
/// [suballocation]: Suballocation
#[inline]
pub fn size(&self) -> DeviceSize {
self.size
}
/// Returns the type of resources that can be bound to this `ResourceMemory`.
///
/// If this `ResourceMemory` is not a [suballocation], then this will be
/// [`AllocationType::Unknown`].
///
/// [suballocation]: Suballocation
#[inline]
pub fn allocation_type(&self) -> AllocationType {
self.allocation_type
}
fn suballocation(&self) -> Option<Suballocation> {
self.suballocation_handle.map(|handle| Suballocation {
offset: self.offset,
size: self.size,
handle,
})
}
/// Returns the mapped pointer to a range of the `ResourceMemory`, or returns [`None`] if ouf
/// of bounds.
///
/// `range` is specified in bytes relative to the beginning of `self` and must fall within the
/// range of the memory mapping given to [`DeviceMemory::map`].
///
/// See [`MappingState::slice`] for the safety invariants of the returned pointer.
///
/// [`MappingState::slice`]: crate::memory::MappingState::slice
#[inline]
pub fn mapped_slice(
&self,
range: impl RangeBounds<DeviceSize>,
) -> Option<Result<NonNull<[u8]>, HostAccessError>> {
let mut range = self::range(range, ..self.size())?;
range.start += self.offset();
range.end += self.offset();
let res = if let Some(state) = self.device_memory().mapping_state() {
state.slice(range).ok_or(HostAccessError::OutOfMappedRange)
} else {
Err(HostAccessError::NotHostMapped)
};
Some(res)
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub unsafe fn mapped_slice_unchecked(
&self,
range: impl RangeBounds<DeviceSize>,
) -> Result<NonNull<[u8]>, HostAccessError> {
let mut range = self::range_unchecked(range, ..self.size());
range.start += self.offset();
range.end += self.offset();
if let Some(state) = self.device_memory().mapping_state() {
state.slice(range).ok_or(HostAccessError::OutOfMappedRange)
} else {
Err(HostAccessError::NotHostMapped)
}
}
pub(crate) fn atom_size(&self) -> Option<DeviceAlignment> {
let memory = self.device_memory();
(!memory.is_coherent()).then_some(memory.atom_size())
}
/// Invalidates the host cache for a range of the `ResourceMemory`.
///
/// If the device memory is not [host-coherent], you must call this function before the memory
/// is read by the host, if the device previously wrote to the memory. It has no effect if the
/// memory is host-coherent.
///
/// # Safety
///
/// - If there are memory writes by the device that have not been propagated into the host
/// cache, then there must not be any references in Rust code to any portion of the specified
/// `memory_range`.
///
/// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
#[inline]
pub unsafe fn invalidate_range(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), Validated<VulkanError>> {
self.validate_memory_range(&memory_range)?;
self.device_memory()
.invalidate_range(self.create_memory_range(memory_range))
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub unsafe fn invalidate_range_unchecked(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), VulkanError> {
self.device_memory()
.invalidate_range_unchecked(self.create_memory_range(memory_range))
}
/// Flushes the host cache for a range of the `ResourceMemory`.
///
/// If the device memory is not [host-coherent], you must call this function after writing to
/// the memory, if the device is going to read the memory. It has no effect if the memory is
/// host-coherent.
///
/// # Safety
///
/// - There must be no operations pending or executing in a device queue, that access any
/// portion of the specified `memory_range`.
///
/// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
#[inline]
pub unsafe fn flush_range(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), Validated<VulkanError>> {
self.validate_memory_range(&memory_range)?;
self.device_memory()
.flush_range(self.create_memory_range(memory_range))
}
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
#[inline]
pub unsafe fn flush_range_unchecked(
&self,
memory_range: MappedMemoryRange,
) -> Result<(), VulkanError> {
self.device_memory()
.flush_range_unchecked(self.create_memory_range(memory_range))
}
fn validate_memory_range(
&self,
memory_range: &MappedMemoryRange,
) -> Result<(), Box<ValidationError>> {
let &MappedMemoryRange {
offset,
size,
_ne: _,
} = memory_range;
if !(offset <= self.size() && size <= self.size() - offset) {
return Err(Box::new(ValidationError {
context: "memory_range".into(),
problem: "is not contained within the allocation".into(),
..Default::default()
}));
}
Ok(())
}
fn create_memory_range(&self, memory_range: MappedMemoryRange) -> MappedMemoryRange {
let MappedMemoryRange {
mut offset,
mut size,
_ne: _,
} = memory_range;
let memory = self.device_memory();
offset += self.offset();
// VUID-VkMappedMemoryRange-size-01390
if memory_range.offset + size == self.size() {
// We can align the end of the range like this without aliasing other allocations,
// because the memory allocator must ensure that all allocations are aligned to the
// atom size for non-host-coherent host-visible memory.
let end = cmp::min(
align_up(offset + size, memory.atom_size()),
memory.allocation_size(),
);
size = end - offset;
}
MappedMemoryRange {
offset,
size,
_ne: crate::NonExhaustive(()),
}
}
}
impl Drop for ResourceMemory {
#[inline]
fn drop(&mut self) {
let device_memory = unsafe { ManuallyDrop::take(&mut self.device_memory) }.0;
if let Some(allocator) = &self.allocator {
let allocation = MemoryAlloc {
device_memory,
suballocation: self.suballocation(),
allocation_type: self.allocation_type,
allocation_handle: self.allocation_handle,
};
// SAFETY: Enforced by the safety contract of [`ResourceMemory::from_allocation`].
unsafe { allocator.deallocate(allocation) };
}
}
}
unsafe impl DeviceOwned for ResourceMemory {
#[inline]
fn device(&self) -> &Arc<Device> {
self.device_memory().device()
}
}
/// Properties of the memory in a physical device.
#[derive(Clone, Debug)]
#[non_exhaustive]

View File

@ -468,6 +468,7 @@ mod tests {
shader::{ShaderModule, ShaderModuleCreateInfo},
sync::{now, GpuFuture},
};
use std::sync::Arc;
// TODO: test for basic creation
// TODO: test for pipeline layout error
@ -531,9 +532,9 @@ mod tests {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let data_buffer = Buffer::from_data(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::STORAGE_BUFFER,
..Default::default()
@ -662,9 +663,9 @@ mod tests {
.unwrap()
};
let memory_allocator = StandardMemoryAllocator::new_default(device.clone());
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let data_buffer = Buffer::from_data(
&memory_allocator,
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::STORAGE_BUFFER,
..Default::default()

View File

@ -665,6 +665,7 @@ mod tests {
SubpassDescription,
},
};
use std::sync::Arc;
#[test]
fn simple_create() {
@ -687,10 +688,10 @@ mod tests {
)
.unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let view = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,
@ -768,10 +769,10 @@ mod tests {
)
.unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let view = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8_UNORM,
@ -818,10 +819,10 @@ mod tests {
)
.unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let view = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,
@ -868,10 +869,10 @@ mod tests {
)
.unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let view = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,
@ -924,10 +925,10 @@ mod tests {
)
.unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let a = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,
@ -942,7 +943,7 @@ mod tests {
.unwrap();
let b = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,
@ -998,10 +999,10 @@ mod tests {
)
.unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let view = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,
@ -1046,10 +1047,10 @@ mod tests {
)
.unwrap();
let memory_allocator = StandardMemoryAllocator::new_default(device);
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device));
let a = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,
@ -1064,7 +1065,7 @@ mod tests {
.unwrap();
let b = ImageView::new_default(
Image::new(
&memory_allocator,
memory_allocator,
ImageCreateInfo {
image_type: ImageType::Dim2d,
format: Format::R8G8B8A8_UNORM,