Merged ImmutableBuffer into DeviceLocalBuffer #1934 (#1936)

* Merged `ImmutableBuffer` into `DeviceLocalBuffer` #1934

* Updated documentation
This commit is contained in:
antonino maniscalco 2022-07-31 11:54:46 +02:00 committed by GitHub
parent f74cd9f7bd
commit 77e59002de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 290 additions and 1019 deletions

View File

@ -1,202 +0,0 @@
// Copyright (c) 2020 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
// This example demonstrates how to initialize immutable buffers.
use vulkano::{
buffer::{BufferUsage, CpuAccessibleBuffer, ImmutableBuffer},
command_buffer::{AutoCommandBufferBuilder, CommandBufferUsage},
descriptor_set::{PersistentDescriptorSet, WriteDescriptorSet},
device::{
physical::{PhysicalDevice, PhysicalDeviceType},
Device, DeviceCreateInfo, DeviceExtensions, QueueCreateInfo,
},
instance::{Instance, InstanceCreateInfo},
pipeline::{ComputePipeline, Pipeline, PipelineBindPoint},
sync::{self, GpuFuture},
VulkanLibrary,
};
fn main() {
// The most part of this example is exactly the same as `basic-compute-shader`. You should read the
// `basic-compute-shader` example if you haven't done so yet.
let library = VulkanLibrary::new().unwrap();
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant vulkan implementations. (ex. MoltenVK)
enumerate_portability: true,
..Default::default()
},
)
.unwrap();
let device_extensions = DeviceExtensions {
khr_storage_buffer_storage_class: true,
..DeviceExtensions::none()
};
let (physical_device, queue_family) = PhysicalDevice::enumerate(&instance)
.filter(|&p| p.supported_extensions().is_superset_of(&device_extensions))
.filter_map(|p| {
p.queue_families()
.find(|&q| q.supports_compute())
.map(|q| (p, q))
})
.min_by_key(|(p, _)| match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
})
.unwrap();
println!(
"Using device: {} (type: {:?})",
physical_device.properties().device_name,
physical_device.properties().device_type
);
let (device, mut queues) = Device::new(
physical_device,
DeviceCreateInfo {
enabled_extensions: device_extensions,
queue_create_infos: vec![QueueCreateInfo::family(queue_family)],
..Default::default()
},
)
.unwrap();
let queue = queues.next().unwrap();
let pipeline = {
mod cs {
vulkano_shaders::shader! {
ty: "compute",
src: "
#version 450
layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in;
layout(set = 0, binding = 0) restrict buffer Data {
uint data[];
} data;
layout(set = 0, binding = 1) readonly restrict buffer ImmutableData {
uint data;
} immutable_data;
void main() {
uint idx = gl_GlobalInvocationID.x;
data.data[idx] *= 12;
data.data[idx] += immutable_data.data;
}"
}
}
let shader = cs::load(device.clone()).unwrap();
ComputePipeline::new(
device.clone(),
shader.entry_point("main").unwrap(),
&(),
None,
|_| {},
)
.unwrap()
};
let data_buffer = {
let data_iter = (0..65536u32).map(|n| n);
CpuAccessibleBuffer::from_iter(device.clone(), BufferUsage::all(), false, data_iter)
.unwrap()
};
// Create immutable buffer and initialize it
let immutable_data_buffer = {
// uninitialized(), uninitialized_array() and raw() return two things: the buffer,
// and a special access that should be used for the initial upload to the buffer.
let (immutable_data_buffer, immutable_data_buffer_init) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
// Build command buffer which initialize our buffer.
let mut builder = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
// Initializing a immutable buffer is done by coping data to
// ImmutableBufferInitialization which is returned by a function we use to create buffer.
// We can use copy_buffer(), fill_buffer() and some other functions that copies data to
// buffer also.
builder
.update_buffer(&3u32, immutable_data_buffer_init, 0)
.unwrap();
let command_buffer = builder.build().unwrap();
let future = sync::now(device.clone())
.then_execute(queue.clone(), command_buffer)
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
future.wait(None).unwrap();
// Once a buffer is initialized, we no longer need ImmutableBufferInitialization.
// So we return only the buffer.
immutable_data_buffer
};
let layout = pipeline.layout().set_layouts().get(0).unwrap();
let set = PersistentDescriptorSet::new(
layout.clone(),
[
WriteDescriptorSet::buffer(0, data_buffer.clone()),
// Now you can just add immutable buffer like other buffers.
WriteDescriptorSet::buffer(1, immutable_data_buffer.clone()),
],
)
.unwrap();
let mut builder = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::OneTimeSubmit,
)
.unwrap();
builder
.bind_pipeline_compute(pipeline.clone())
.bind_descriptor_sets(
PipelineBindPoint::Compute,
pipeline.layout().clone(),
0,
set.clone(),
)
.dispatch([1024, 1, 1])
.unwrap();
let command_buffer = builder.build().unwrap();
let future = sync::now(device.clone())
.then_execute(queue.clone(), command_buffer)
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
future.wait(None).unwrap();
let data_buffer_content = data_buffer.read().unwrap();
for n in 0..65536u32 {
assert_eq!(data_buffer_content[n as usize], n * 12 + 3);
}
}

View File

@ -17,10 +17,14 @@
use super::{ use super::{
sys::{UnsafeBuffer, UnsafeBufferCreateInfo}, sys::{UnsafeBuffer, UnsafeBufferCreateInfo},
BufferAccess, BufferAccessObject, BufferContents, BufferCreationError, BufferInner, BufferAccess, BufferAccessObject, BufferContents, BufferCreationError, BufferInner,
BufferUsage, TypedBufferAccess, BufferUsage, CpuAccessibleBuffer, TypedBufferAccess,
}; };
use crate::{ use crate::{
device::{physical::QueueFamily, Device, DeviceOwned}, command_buffer::{
AutoCommandBufferBuilder, CommandBufferBeginError, CommandBufferExecFuture,
CommandBufferUsage, CopyBufferInfo, PrimaryAutoCommandBuffer, PrimaryCommandBuffer,
},
device::{physical::QueueFamily, Device, DeviceOwned, Queue},
memory::{ memory::{
pool::{ pool::{
alloc_dedicated_with_exportable_fd, AllocFromRequirementsFilter, AllocLayout, alloc_dedicated_with_exportable_fd, AllocFromRequirementsFilter, AllocLayout,
@ -29,11 +33,13 @@ use crate::{
DedicatedAllocation, DeviceMemoryAllocationError, DeviceMemoryExportError, DedicatedAllocation, DeviceMemoryAllocationError, DeviceMemoryExportError,
ExternalMemoryHandleType, MemoryPool, MemoryRequirements, ExternalMemoryHandleType, MemoryPool, MemoryRequirements,
}, },
sync::Sharing, sync::{NowFuture, Sharing},
DeviceSize, DeviceSize,
}; };
use core::fmt;
use smallvec::SmallVec; use smallvec::SmallVec;
use std::{ use std::{
error,
fs::File, fs::File,
hash::{Hash, Hasher}, hash::{Hash, Hasher},
marker::PhantomData, marker::PhantomData,
@ -164,6 +170,133 @@ where
} }
} }
// TODO: make this prettier
type DeviceLocalBufferFromBufferFuture =
CommandBufferExecFuture<NowFuture, PrimaryAutoCommandBuffer>;
impl<T> DeviceLocalBuffer<T>
where
T: BufferContents + ?Sized,
{
/// Builds a `DeviceLocalBuffer` that copies its data from another buffer.
///
/// This function returns two objects: the newly-created buffer, and a future representing
/// the initial upload operation. In order to be allowed to use the `DeviceLocalBuffer`, you must
/// either submit your operation after this future, or execute this future and wait for it to
/// be finished before submitting your own operation.
pub fn from_buffer<B>(
source: Arc<B>,
usage: BufferUsage,
queue: Arc<Queue>,
) -> Result<
(Arc<DeviceLocalBuffer<T>>, DeviceLocalBufferFromBufferFuture),
DeviceLocalBufferCreationError,
>
where
B: TypedBufferAccess<Content = T> + 'static,
{
unsafe {
// We automatically set `transfer_dst` to true in order to avoid annoying errors.
let actual_usage = BufferUsage {
transfer_dst: true,
..usage
};
let buffer = DeviceLocalBuffer::raw(
source.device().clone(),
source.size(),
actual_usage,
source.device().active_queue_families(),
)?;
let mut cbb = AutoCommandBufferBuilder::primary(
source.device().clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)?;
cbb.copy_buffer(CopyBufferInfo::buffers(source, buffer.clone()))
.unwrap(); // TODO: return error?
let cb = cbb.build().unwrap(); // TODO: return OomError
let future = match cb.execute(queue) {
Ok(f) => f,
Err(_) => unreachable!(),
};
Ok((buffer, future))
}
}
}
impl<T> DeviceLocalBuffer<T>
where
T: BufferContents,
{
/// Builds an `DeviceLocalBuffer` from some data.
///
/// This function builds a memory-mapped intermediate buffer, writes the data to it, builds a
/// command buffer that copies from this intermediate buffer to the final buffer, and finally
/// submits the command buffer as a future.
///
/// This function returns two objects: the newly-created buffer, and a future representing
/// the initial upload operation. In order to be allowed to use the `DeviceLocalBuffer`, you must
/// either submit your operation after this future, or execute this future and wait for it to
/// be finished before submitting your own operation.
///
/// # Panics
///
/// - Panics if `T` has zero size.
pub fn from_data(
data: T,
usage: BufferUsage,
queue: Arc<Queue>,
) -> Result<
(Arc<DeviceLocalBuffer<T>>, DeviceLocalBufferFromBufferFuture),
DeviceLocalBufferCreationError,
> {
let source = CpuAccessibleBuffer::from_data(
queue.device().clone(),
BufferUsage::transfer_src(),
false,
data,
)?;
DeviceLocalBuffer::from_buffer(source, usage, queue)
}
}
impl<T> DeviceLocalBuffer<[T]>
where
[T]: BufferContents,
{
/// # Panics
///
/// - Panics if `T` has zero size.
/// - Panics if `data` is empty.
pub fn from_iter<D>(
data: D,
usage: BufferUsage,
queue: Arc<Queue>,
) -> Result<
(
Arc<DeviceLocalBuffer<[T]>>,
DeviceLocalBufferFromBufferFuture,
),
DeviceLocalBufferCreationError,
>
where
D: IntoIterator<Item = T>,
D::IntoIter: ExactSizeIterator,
{
let source = CpuAccessibleBuffer::from_iter(
queue.device().clone(),
BufferUsage::transfer_src(),
false,
data,
)?;
DeviceLocalBuffer::from_buffer(source, usage, queue)
}
}
impl<T> DeviceLocalBuffer<[T]> impl<T> DeviceLocalBuffer<[T]>
where where
[T]: BufferContents, [T]: BufferContents,
@ -436,3 +569,133 @@ where
self.size().hash(state); self.size().hash(state);
} }
} }
#[derive(Clone, Debug)]
pub enum DeviceLocalBufferCreationError {
DeviceMemoryAllocationError(DeviceMemoryAllocationError),
CommandBufferBeginError(CommandBufferBeginError),
}
impl error::Error for DeviceLocalBufferCreationError {
#[inline]
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match self {
Self::DeviceMemoryAllocationError(err) => Some(err),
Self::CommandBufferBeginError(err) => Some(err),
}
}
}
impl fmt::Display for DeviceLocalBufferCreationError {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::DeviceMemoryAllocationError(err) => err.fmt(f),
Self::CommandBufferBeginError(err) => err.fmt(f),
}
}
}
impl From<DeviceMemoryAllocationError> for DeviceLocalBufferCreationError {
#[inline]
fn from(e: DeviceMemoryAllocationError) -> Self {
Self::DeviceMemoryAllocationError(e)
}
}
impl From<CommandBufferBeginError> for DeviceLocalBufferCreationError {
#[inline]
fn from(e: CommandBufferBeginError) -> Self {
Self::CommandBufferBeginError(e)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::sync::GpuFuture;
#[test]
fn from_data_working() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) =
DeviceLocalBuffer::from_data(12u32, BufferUsage::all(), queue.clone()).unwrap();
let destination =
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(CopyBufferInfo::buffers(buffer, destination.clone()))
.unwrap();
let _ = cbb
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
let destination_content = destination.read().unwrap();
assert_eq!(*destination_content, 12);
}
#[test]
fn from_iter_working() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = DeviceLocalBuffer::from_iter(
(0..512u32).map(|n| n * 2),
BufferUsage::all(),
queue.clone(),
)
.unwrap();
let destination = CpuAccessibleBuffer::from_iter(
device.clone(),
BufferUsage::all(),
false,
(0..512).map(|_| 0u32),
)
.unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(CopyBufferInfo::buffers(buffer, destination.clone()))
.unwrap();
let _ = cbb
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
let destination_content = destination.read().unwrap();
for (n, &v) in destination_content.iter().enumerate() {
assert_eq!(n * 2, v as usize);
}
}
#[test]
#[allow(unused)]
fn create_buffer_zero_size_data() {
let (device, queue) = gfx_dev_and_queue!();
assert_should_panic!({
DeviceLocalBuffer::from_data((), BufferUsage::all(), queue.clone()).unwrap();
});
}
// TODO: write tons of tests that try to exploit loopholes
// this isn't possible yet because checks aren't correctly implemented yet
}

View File

@ -1,785 +0,0 @@
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Buffer that is written once then read for as long as it is alive.
//!
//! Use this buffer when you have data that you never modify.
//!
//! Only the first ever command buffer that uses this buffer can write to it (for example by
//! copying from another buffer). Any subsequent command buffer **must** only read from the buffer,
//! or a panic will happen.
//!
//! The buffer will be stored in device-local memory if possible
//!
use super::{
sys::UnsafeBuffer, BufferAccess, BufferAccessObject, BufferContents, BufferInner, BufferUsage,
CpuAccessibleBuffer,
};
use crate::{
buffer::{sys::UnsafeBufferCreateInfo, BufferCreationError, TypedBufferAccess},
command_buffer::{
AutoCommandBufferBuilder, CommandBufferBeginError, CommandBufferExecFuture,
CommandBufferUsage, CopyBufferInfo, PrimaryAutoCommandBuffer, PrimaryCommandBuffer,
},
device::{physical::QueueFamily, Device, DeviceOwned, Queue},
memory::{
pool::{
AllocFromRequirementsFilter, AllocLayout, MappingRequirement, MemoryPoolAlloc,
PotentialDedicatedAllocation, StdMemoryPoolAlloc,
},
DedicatedAllocation, DeviceMemoryAllocationError, MemoryPool,
},
sync::{NowFuture, Sharing},
DeviceSize, OomError,
};
use smallvec::SmallVec;
use std::{
error, fmt,
hash::{Hash, Hasher},
marker::PhantomData,
mem::size_of,
sync::Arc,
};
/// Buffer that is written once then read for as long as it is alive.
#[derive(Debug)]
pub struct ImmutableBuffer<T, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>>
where
T: BufferContents + ?Sized,
{
// Inner content.
inner: Arc<UnsafeBuffer>,
// Memory allocated for the buffer.
memory: A,
// Queue families allowed to access this buffer.
queue_families: SmallVec<[u32; 4]>,
// Necessary to have the appropriate template parameter.
marker: PhantomData<Box<T>>,
}
// TODO: make this prettier
type ImmutableBufferFromBufferFuture = CommandBufferExecFuture<NowFuture, PrimaryAutoCommandBuffer>;
impl<T> ImmutableBuffer<T>
where
T: BufferContents + ?Sized,
{
/// Builds an `ImmutableBuffer` that copies its data from another buffer.
///
/// This function returns two objects: the newly-created buffer, and a future representing
/// the initial upload operation. In order to be allowed to use the `ImmutableBuffer`, you must
/// either submit your operation after this future, or execute this future and wait for it to
/// be finished before submitting your own operation.
pub fn from_buffer<B>(
source: Arc<B>,
usage: BufferUsage,
queue: Arc<Queue>,
) -> Result<
(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture),
ImmutableBufferCreationError,
>
where
B: TypedBufferAccess<Content = T> + 'static,
{
unsafe {
// We automatically set `transfer_dst` to true in order to avoid annoying errors.
let actual_usage = BufferUsage {
transfer_dst: true,
..usage
};
let (buffer, init) = ImmutableBuffer::raw(
source.device().clone(),
source.size(),
actual_usage,
source.device().active_queue_families(),
)?;
let mut cbb = AutoCommandBufferBuilder::primary(
source.device().clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)?;
cbb.copy_buffer(CopyBufferInfo::buffers(source, init))
.unwrap(); // TODO: return error?
let cb = cbb.build().unwrap(); // TODO: return OomError
let future = match cb.execute(queue) {
Ok(f) => f,
Err(_) => unreachable!(),
};
Ok((buffer, future))
}
}
}
impl<T> ImmutableBuffer<T>
where
T: BufferContents,
{
/// Builds an `ImmutableBuffer` from some data.
///
/// This function builds a memory-mapped intermediate buffer, writes the data to it, builds a
/// command buffer that copies from this intermediate buffer to the final buffer, and finally
/// submits the command buffer as a future.
///
/// This function returns two objects: the newly-created buffer, and a future representing
/// the initial upload operation. In order to be allowed to use the `ImmutableBuffer`, you must
/// either submit your operation after this future, or execute this future and wait for it to
/// be finished before submitting your own operation.
///
/// # Panics
///
/// - Panics if `T` has zero size.
pub fn from_data(
data: T,
usage: BufferUsage,
queue: Arc<Queue>,
) -> Result<
(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture),
ImmutableBufferCreationError,
> {
let source = CpuAccessibleBuffer::from_data(
queue.device().clone(),
BufferUsage::transfer_src(),
false,
data,
)?;
ImmutableBuffer::from_buffer(source, usage, queue)
}
/// Builds a new buffer with uninitialized data. Only allowed for sized data.
///
/// Returns two things: the buffer, and a special access that should be used for the initial
/// upload to the buffer.
///
/// You will get an error if you try to use the buffer before using the initial upload access.
/// However this function doesn't check whether you actually used this initial upload to fill
/// the buffer like you're supposed to do.
///
/// You will also get an error if you try to get exclusive access to the final buffer.
///
/// # Safety
///
/// - The `ImmutableBufferInitialization` should be used to fill the buffer with some initial
/// data, otherwise the content is undefined.
///
/// # Panics
///
/// - Panics if `T` has zero size.
#[inline]
pub unsafe fn uninitialized(
device: Arc<Device>,
usage: BufferUsage,
) -> Result<
(
Arc<ImmutableBuffer<T>>,
Arc<ImmutableBufferInitialization<T>>,
),
ImmutableBufferCreationError,
> {
ImmutableBuffer::raw(
device.clone(),
size_of::<T>() as DeviceSize,
usage,
device.active_queue_families(),
)
}
}
impl<T> ImmutableBuffer<[T]>
where
[T]: BufferContents,
{
/// # Panics
///
/// - Panics if `T` has zero size.
/// - Panics if `data` is empty.
pub fn from_iter<D>(
data: D,
usage: BufferUsage,
queue: Arc<Queue>,
) -> Result<
(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture),
ImmutableBufferCreationError,
>
where
D: IntoIterator<Item = T>,
D::IntoIter: ExactSizeIterator,
{
let source = CpuAccessibleBuffer::from_iter(
queue.device().clone(),
BufferUsage::transfer_src(),
false,
data,
)?;
ImmutableBuffer::from_buffer(source, usage, queue)
}
/// Builds a new buffer with uninitialized data. Can be used for arrays.
///
/// Returns two things: the buffer, and a special access that should be used for the initial
/// upload to the buffer.
///
/// You will get an error if you try to use the buffer before using the initial upload access.
/// However this function doesn't check whether you actually used this initial upload to fill
/// the buffer like you're supposed to do.
///
/// You will also get an error if you try to get exclusive access to the final buffer.
///
/// # Safety
///
/// - The `ImmutableBufferInitialization` should be used to fill the buffer with some initial
/// data, otherwise the content is undefined.
///
/// # Panics
///
/// - Panics if `T` has zero size.
/// - Panics if `len` is zero.
#[inline]
pub unsafe fn uninitialized_array(
device: Arc<Device>,
len: DeviceSize,
usage: BufferUsage,
) -> Result<
(
Arc<ImmutableBuffer<[T]>>,
Arc<ImmutableBufferInitialization<[T]>>,
),
ImmutableBufferCreationError,
> {
ImmutableBuffer::raw(
device.clone(),
len * size_of::<T>() as DeviceSize,
usage,
device.active_queue_families(),
)
}
}
impl<T> ImmutableBuffer<T>
where
T: BufferContents + ?Sized,
{
/// Builds a new buffer without checking the size and granting free access for the initial
/// upload.
///
/// Returns two things: the buffer, and a special access that should be used for the initial
/// upload to the buffer.
/// You will get an error if you try to use the buffer before using the initial upload access.
/// However this function doesn't check whether you used this initial upload to fill the buffer.
/// You will also get an error if you try to get exclusive access to the final buffer.
///
/// # Safety
///
/// - You must ensure that the size that you pass is correct for `T`.
/// - The `ImmutableBufferInitialization` should be used to fill the buffer with some initial
/// data.
///
/// # Panics
///
/// - Panics if `size` is zero.
#[inline]
pub unsafe fn raw<'a, I>(
device: Arc<Device>,
size: DeviceSize,
usage: BufferUsage,
queue_families: I,
) -> Result<
(
Arc<ImmutableBuffer<T>>,
Arc<ImmutableBufferInitialization<T>>,
),
ImmutableBufferCreationError,
>
where
I: IntoIterator<Item = QueueFamily<'a>>,
{
let queue_families = queue_families.into_iter().map(|f| f.id()).collect();
ImmutableBuffer::raw_impl(device, size, usage, queue_families)
}
// Internal implementation of `raw`. This is separated from `raw` so that it doesn't need to be
// inlined.
unsafe fn raw_impl(
device: Arc<Device>,
size: DeviceSize,
usage: BufferUsage,
queue_families: SmallVec<[u32; 4]>,
) -> Result<
(
Arc<ImmutableBuffer<T>>,
Arc<ImmutableBufferInitialization<T>>,
),
ImmutableBufferCreationError,
> {
let buffer = match UnsafeBuffer::new(
device.clone(),
UnsafeBufferCreateInfo {
sharing: if queue_families.len() >= 2 {
Sharing::Concurrent(queue_families.clone())
} else {
Sharing::Exclusive
},
size,
usage,
..Default::default()
},
) {
Ok(b) => b,
Err(BufferCreationError::AllocError(err)) => return Err(err.into()),
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
// errors can't happen
};
let mem_reqs = buffer.memory_requirements();
let mem = MemoryPool::alloc_from_requirements(
&Device::standard_pool(&device),
&mem_reqs,
AllocLayout::Linear,
MappingRequirement::DoNotMap,
Some(DedicatedAllocation::Buffer(&buffer)),
|t| {
if t.is_device_local() {
AllocFromRequirementsFilter::Preferred
} else {
AllocFromRequirementsFilter::Allowed
}
},
)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
buffer.bind_memory(mem.memory(), mem.offset())?;
let final_buf = Arc::new(ImmutableBuffer {
inner: buffer,
memory: mem,
queue_families: queue_families,
marker: PhantomData,
});
let initialization = Arc::new(ImmutableBufferInitialization {
buffer: final_buf.clone(),
});
Ok((final_buf, initialization))
}
}
impl<T, A> ImmutableBuffer<T, A>
where
T: BufferContents + ?Sized,
{
/// Returns the device used to create this buffer.
#[inline]
pub fn device(&self) -> &Arc<Device> {
self.inner.device()
}
/// Returns the queue families this buffer can be used on.
// TODO: use a custom iterator
#[inline]
pub fn queue_families(&self) -> Vec<QueueFamily> {
self.queue_families
.iter()
.map(|&num| {
self.device()
.physical_device()
.queue_family_by_id(num)
.unwrap()
})
.collect()
}
}
unsafe impl<T, A> BufferAccess for ImmutableBuffer<T, A>
where
T: BufferContents + ?Sized,
A: Send + Sync,
{
#[inline]
fn inner(&self) -> BufferInner {
BufferInner {
buffer: &self.inner,
offset: 0,
}
}
#[inline]
fn size(&self) -> DeviceSize {
self.inner.size()
}
}
impl<T, A> BufferAccessObject for Arc<ImmutableBuffer<T, A>>
where
T: BufferContents + ?Sized,
A: Send + Sync + 'static,
{
#[inline]
fn as_buffer_access_object(&self) -> Arc<dyn BufferAccess> {
self.clone()
}
}
unsafe impl<T, A> TypedBufferAccess for ImmutableBuffer<T, A>
where
T: BufferContents + ?Sized,
A: Send + Sync,
{
type Content = T;
}
unsafe impl<T, A> DeviceOwned for ImmutableBuffer<T, A>
where
T: BufferContents + ?Sized,
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.inner.device()
}
}
impl<T, A> PartialEq for ImmutableBuffer<T, A>
where
T: BufferContents + ?Sized,
A: Send + Sync,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.inner() == other.inner() && self.size() == other.size()
}
}
impl<T, A> Eq for ImmutableBuffer<T, A>
where
T: BufferContents + ?Sized,
A: Send + Sync,
{
}
impl<T, A> Hash for ImmutableBuffer<T, A>
where
T: BufferContents + ?Sized,
A: Send + Sync,
{
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.inner().hash(state);
self.size().hash(state);
}
}
/// Access to the immutable buffer that can be used for the initial upload.
#[derive(Debug)]
pub struct ImmutableBufferInitialization<T, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>>
where
T: BufferContents + ?Sized,
{
buffer: Arc<ImmutableBuffer<T, A>>,
}
unsafe impl<T, A> BufferAccess for ImmutableBufferInitialization<T, A>
where
T: BufferContents + ?Sized,
A: Send + Sync,
{
#[inline]
fn inner(&self) -> BufferInner {
self.buffer.inner()
}
#[inline]
fn size(&self) -> DeviceSize {
self.buffer.size()
}
}
impl<T, A> BufferAccessObject for Arc<ImmutableBufferInitialization<T, A>>
where
T: BufferContents + ?Sized,
A: Send + Sync + 'static,
{
#[inline]
fn as_buffer_access_object(&self) -> Arc<dyn BufferAccess> {
self.clone()
}
}
unsafe impl<T, A> TypedBufferAccess for ImmutableBufferInitialization<T, A>
where
T: BufferContents + ?Sized,
A: Send + Sync,
{
type Content = T;
}
unsafe impl<T, A> DeviceOwned for ImmutableBufferInitialization<T, A>
where
T: BufferContents + ?Sized,
{
#[inline]
fn device(&self) -> &Arc<Device> {
self.buffer.inner.device()
}
}
impl<T, A> Clone for ImmutableBufferInitialization<T, A>
where
T: BufferContents + ?Sized,
{
#[inline]
fn clone(&self) -> ImmutableBufferInitialization<T, A> {
ImmutableBufferInitialization {
buffer: self.buffer.clone(),
}
}
}
impl<T, A> PartialEq for ImmutableBufferInitialization<T, A>
where
T: BufferContents + ?Sized,
A: Send + Sync,
{
#[inline]
fn eq(&self, other: &Self) -> bool {
self.inner() == other.inner() && self.size() == other.size()
}
}
impl<T, A> Eq for ImmutableBufferInitialization<T, A>
where
T: BufferContents + ?Sized,
A: Send + Sync,
{
}
impl<T, A> Hash for ImmutableBufferInitialization<T, A>
where
T: BufferContents + ?Sized,
A: Send + Sync,
{
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.inner().hash(state);
self.size().hash(state);
}
}
#[derive(Clone, Debug)]
pub enum ImmutableBufferCreationError {
DeviceMemoryAllocationError(DeviceMemoryAllocationError),
CommandBufferBeginError(CommandBufferBeginError),
}
impl error::Error for ImmutableBufferCreationError {
#[inline]
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match self {
Self::DeviceMemoryAllocationError(err) => Some(err),
Self::CommandBufferBeginError(err) => Some(err),
}
}
}
impl fmt::Display for ImmutableBufferCreationError {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::DeviceMemoryAllocationError(err) => err.fmt(f),
Self::CommandBufferBeginError(err) => err.fmt(f),
}
}
}
impl From<DeviceMemoryAllocationError> for ImmutableBufferCreationError {
#[inline]
fn from(err: DeviceMemoryAllocationError) -> Self {
Self::DeviceMemoryAllocationError(err)
}
}
impl From<OomError> for ImmutableBufferCreationError {
#[inline]
fn from(err: OomError) -> Self {
Self::DeviceMemoryAllocationError(err.into())
}
}
impl From<CommandBufferBeginError> for ImmutableBufferCreationError {
#[inline]
fn from(err: CommandBufferBeginError) -> Self {
Self::CommandBufferBeginError(err)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::sync::GpuFuture;
#[test]
fn from_data_working() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) =
ImmutableBuffer::from_data(12u32, BufferUsage::all(), queue.clone()).unwrap();
let destination =
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(CopyBufferInfo::buffers(buffer, destination.clone()))
.unwrap();
let _ = cbb
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
let destination_content = destination.read().unwrap();
assert_eq!(*destination_content, 12);
}
#[test]
fn from_iter_working() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::from_iter(
(0..512u32).map(|n| n * 2),
BufferUsage::all(),
queue.clone(),
)
.unwrap();
let destination = CpuAccessibleBuffer::from_iter(
device.clone(),
BufferUsage::all(),
false,
(0..512).map(|_| 0u32),
)
.unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(CopyBufferInfo::buffers(buffer, destination.clone()))
.unwrap();
let _ = cbb
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
let destination_content = destination.read().unwrap();
for (n, &v) in destination_content.iter().enumerate() {
assert_eq!(n * 2, v as usize);
}
}
#[test]
fn init_then_read_same_cb() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, init) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
let source =
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(CopyBufferInfo::buffers(source.clone(), init))
.unwrap()
.copy_buffer(CopyBufferInfo::buffers(buffer, source.clone()))
.unwrap();
let _ = cbb
.build()
.unwrap()
.execute(queue.clone())
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
}
#[test]
#[ignore] // TODO: doesn't work because the submit sync layer isn't properly implemented
fn init_then_read_same_future() {
let (device, queue) = gfx_dev_and_queue!();
let (buffer, init) = unsafe {
ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
};
let source =
CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(CopyBufferInfo::buffers(source.clone(), init))
.unwrap();
let cb1 = cbb.build().unwrap();
let mut cbb = AutoCommandBufferBuilder::primary(
device.clone(),
queue.family(),
CommandBufferUsage::MultipleSubmit,
)
.unwrap();
cbb.copy_buffer(CopyBufferInfo::buffers(buffer, source.clone()))
.unwrap();
let cb2 = cbb.build().unwrap();
let _ = cb1
.execute(queue.clone())
.unwrap()
.then_execute(queue.clone(), cb2)
.unwrap()
.then_signal_fence_and_flush()
.unwrap();
}
#[test]
#[allow(unused)]
fn create_buffer_zero_size_data() {
let (device, queue) = gfx_dev_and_queue!();
assert_should_panic!({
ImmutableBuffer::from_data((), BufferUsage::all(), queue.clone()).unwrap();
});
}
// TODO: write tons of tests that try to exploit loopholes
// this isn't possible yet because checks aren't correctly implemented yet
}

View File

@ -33,10 +33,6 @@
//! usually located in video memory and whose content can't be directly accessed by your //! usually located in video memory and whose content can't be directly accessed by your
//! application. Accessing this buffer from the GPU is generally faster compared to accessing a //! application. Accessing this buffer from the GPU is generally faster compared to accessing a
//! CPU-accessible buffer. //! CPU-accessible buffer.
//! - An [`ImmutableBuffer`](crate::buffer::immutable::ImmutableBuffer) designates a buffer in video
//! memory and whose content can only be written at creation. Compared to `DeviceLocalBuffer`,
//! this buffer requires less CPU processing because we don't need to keep track of the reads
//! and writes.
//! - A [`CpuBufferPool`](crate::buffer::cpu_pool::CpuBufferPool) is a ring buffer that can be used to //! - A [`CpuBufferPool`](crate::buffer::cpu_pool::CpuBufferPool) is a ring buffer that can be used to
//! transfer data between the CPU and the GPU at a high rate. //! transfer data between the CPU and the GPU at a high rate.
//! - A [`CpuAccessibleBuffer`](crate::buffer::cpu_access::CpuAccessibleBuffer) is a simple buffer that //! - A [`CpuAccessibleBuffer`](crate::buffer::cpu_access::CpuAccessibleBuffer) is a simple buffer that
@ -45,12 +41,7 @@
//! Here is a quick way to choose which buffer to use. Do you often need to read or write //! Here is a quick way to choose which buffer to use. Do you often need to read or write
//! the content of the buffer? If so, use a `CpuBufferPool`. Otherwise, do you need to be able to //! the content of the buffer? If so, use a `CpuBufferPool`. Otherwise, do you need to be able to
//! modify the content of the buffer after its initialization? If so, use a `DeviceLocalBuffer`. //! modify the content of the buffer after its initialization? If so, use a `DeviceLocalBuffer`.
//! If no to both questions, use an `ImmutableBuffer`.
//! //!
//! When deciding how your buffer is going to be used, don't forget that sometimes the best
//! solution is to manipulate multiple buffers instead. For example if you need to update a buffer's
//! content only from time to time, it may be a good idea to simply recreate a new `ImmutableBuffer`
//! every time.
//! Another example: if a buffer is under constant access by the GPU but you need to //! Another example: if a buffer is under constant access by the GPU but you need to
//! read its content on the CPU from time to time, it may be a good idea to use a //! read its content on the CPU from time to time, it may be a good idea to use a
//! `DeviceLocalBuffer` as the main buffer and a `CpuBufferPool` for when you need to read it. //! `DeviceLocalBuffer` as the main buffer and a `CpuBufferPool` for when you need to read it.
@ -87,7 +78,6 @@ pub use self::{
cpu_access::CpuAccessibleBuffer, cpu_access::CpuAccessibleBuffer,
cpu_pool::CpuBufferPool, cpu_pool::CpuBufferPool,
device_local::DeviceLocalBuffer, device_local::DeviceLocalBuffer,
immutable::ImmutableBuffer,
slice::BufferSlice, slice::BufferSlice,
sys::{BufferCreationError, SparseLevel}, sys::{BufferCreationError, SparseLevel},
traits::{ traits::{
@ -108,7 +98,6 @@ use std::mem::size_of;
pub mod cpu_access; pub mod cpu_access;
pub mod cpu_pool; pub mod cpu_pool;
pub mod device_local; pub mod device_local;
pub mod immutable;
pub mod sys; pub mod sys;
pub mod view; pub mod view;

View File

@ -141,10 +141,10 @@ impl<T: ?Sized, B> BufferSlice<T, B> {
/// ``` /// ```
/// # use std::sync::Arc; /// # use std::sync::Arc;
/// # use vulkano::buffer::BufferSlice; /// # use vulkano::buffer::BufferSlice;
/// # use vulkano::buffer::immutable::ImmutableBuffer; /// # use vulkano::buffer::DeviceLocalBuffer;
/// # struct VertexImpl; /// # struct VertexImpl;
/// let blob_slice: Arc<BufferSlice<[u8], Arc<ImmutableBuffer<[u8]>>>> = return; /// let blob_slice: Arc<BufferSlice<[u8], Arc<DeviceLocalBuffer<[u8]>>>> = return;
/// let vertex_slice: Arc<BufferSlice<[VertexImpl], Arc<ImmutableBuffer<[u8]>>>> = unsafe { /// let vertex_slice: Arc<BufferSlice<[VertexImpl], Arc<DeviceLocalBuffer<[u8]>>>> = unsafe {
/// blob_slice.reinterpret::<[VertexImpl]>() /// blob_slice.reinterpret::<[VertexImpl]>()
/// }; /// };
/// ``` /// ```

View File

@ -19,7 +19,7 @@
//! //!
//! ``` //! ```
//! # use std::sync::Arc; //! # use std::sync::Arc;
//! use vulkano::buffer::immutable::ImmutableBuffer; //! use vulkano::buffer::DeviceLocalBuffer;
//! use vulkano::buffer::BufferUsage; //! use vulkano::buffer::BufferUsage;
//! use vulkano::buffer::view::{BufferView, BufferViewCreateInfo}; //! use vulkano::buffer::view::{BufferView, BufferViewCreateInfo};
//! use vulkano::format::Format; //! use vulkano::format::Format;
@ -31,7 +31,7 @@
//! .. BufferUsage::none() //! .. BufferUsage::none()
//! }; //! };
//! //!
//! let (buffer, _future) = ImmutableBuffer::<[u32]>::from_iter((0..128).map(|n| n), usage, //! let (buffer, _future) = DeviceLocalBuffer::<[u32]>::from_iter((0..128).map(|n| n), usage,
//! queue.clone()).unwrap(); //! queue.clone()).unwrap();
//! let _view = BufferView::new( //! let _view = BufferView::new(
//! buffer, //! buffer,
@ -462,9 +462,8 @@ impl Hash for dyn BufferViewAbstract {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::buffer::immutable::ImmutableBuffer;
use crate::buffer::view::{BufferView, BufferViewCreateInfo, BufferViewCreationError}; use crate::buffer::view::{BufferView, BufferViewCreateInfo, BufferViewCreationError};
use crate::buffer::BufferUsage; use crate::buffer::{BufferUsage, DeviceLocalBuffer};
use crate::format::Format; use crate::format::Format;
#[test] #[test]
@ -477,9 +476,12 @@ mod tests {
..BufferUsage::none() ..BufferUsage::none()
}; };
let (buffer, _) = let (buffer, _) = DeviceLocalBuffer::<[[u8; 4]]>::from_iter(
ImmutableBuffer::<[[u8; 4]]>::from_iter((0..128).map(|_| [0; 4]), usage, queue.clone()) (0..128).map(|_| [0; 4]),
.unwrap(); usage,
queue.clone(),
)
.unwrap();
let view = BufferView::new( let view = BufferView::new(
buffer, buffer,
BufferViewCreateInfo { BufferViewCreateInfo {
@ -500,9 +502,12 @@ mod tests {
..BufferUsage::none() ..BufferUsage::none()
}; };
let (buffer, _) = let (buffer, _) = DeviceLocalBuffer::<[[u8; 4]]>::from_iter(
ImmutableBuffer::<[[u8; 4]]>::from_iter((0..128).map(|_| [0; 4]), usage, queue.clone()) (0..128).map(|_| [0; 4]),
.unwrap(); usage,
queue.clone(),
)
.unwrap();
BufferView::new( BufferView::new(
buffer, buffer,
BufferViewCreateInfo { BufferViewCreateInfo {
@ -524,7 +529,8 @@ mod tests {
}; };
let (buffer, _) = let (buffer, _) =
ImmutableBuffer::<[u32]>::from_iter((0..128).map(|_| 0), usage, queue.clone()).unwrap(); DeviceLocalBuffer::<[u32]>::from_iter((0..128).map(|_| 0), usage, queue.clone())
.unwrap();
BufferView::new( BufferView::new(
buffer, buffer,
BufferViewCreateInfo { BufferViewCreateInfo {
@ -540,7 +546,7 @@ mod tests {
// `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format // `VK_FORMAT_R8G8B8A8_UNORM` guaranteed to be a supported format
let (device, queue) = gfx_dev_and_queue!(); let (device, queue) = gfx_dev_and_queue!();
let (buffer, _) = ImmutableBuffer::<[[u8; 4]]>::from_iter( let (buffer, _) = DeviceLocalBuffer::<[[u8; 4]]>::from_iter(
(0..128).map(|_| [0; 4]), (0..128).map(|_| [0; 4]),
BufferUsage::none(), BufferUsage::none(),
queue.clone(), queue.clone(),
@ -569,7 +575,7 @@ mod tests {
..BufferUsage::none() ..BufferUsage::none()
}; };
let (buffer, _) = ImmutableBuffer::<[[f64; 4]]>::from_iter( let (buffer, _) = DeviceLocalBuffer::<[[f64; 4]]>::from_iter(
(0..128).map(|_| [0.0; 4]), (0..128).map(|_| [0.0; 4]),
usage, usage,
queue.clone(), queue.clone(),

View File

@ -523,7 +523,7 @@ impl std::fmt::Debug for dyn Command {
mod tests { mod tests {
use super::*; use super::*;
use crate::{ use crate::{
buffer::{BufferUsage, CpuAccessibleBuffer, ImmutableBuffer}, buffer::{BufferUsage, CpuAccessibleBuffer, DeviceLocalBuffer},
command_buffer::{ command_buffer::{
pool::{CommandPool, CommandPoolBuilderAlloc}, pool::{CommandPool, CommandPoolBuilderAlloc},
sys::CommandBufferBeginInfo, sys::CommandBufferBeginInfo,
@ -571,7 +571,7 @@ mod tests {
// Create a tiny test buffer // Create a tiny test buffer
let (buf, future) = let (buf, future) =
ImmutableBuffer::from_data(0u32, BufferUsage::transfer_dst(), queue.clone()) DeviceLocalBuffer::from_data(0u32, BufferUsage::transfer_dst(), queue.clone())
.unwrap(); .unwrap();
future future
.then_signal_fence_and_flush() .then_signal_fence_and_flush()