Rename next and chunk methods of CpuBufferPool for consistency (#1978)

This commit is contained in:
Rua 2022-09-17 09:51:26 +02:00 committed by GitHub
parent 588d287175
commit 46187e85fb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 25 additions and 21 deletions

View File

@ -328,7 +328,7 @@ fn main() {
let num_vertices = data.len() as u32; let num_vertices = data.len() as u32;
// Allocate a new chunk from buffer_pool // Allocate a new chunk from buffer_pool
let buffer = buffer_pool.chunk(data.to_vec()).unwrap(); let buffer = buffer_pool.from_iter(data.to_vec()).unwrap();
let mut builder = AutoCommandBufferBuilder::primary( let mut builder = AutoCommandBufferBuilder::primary(
device.clone(), device.clone(),
queue.queue_family_index(), queue.queue_family_index(),

View File

@ -379,12 +379,12 @@ fn main() {
first_vertex: 0, first_vertex: 0,
first_instance: 0, first_instance: 0,
}]; }];
let indirect_buffer = indirect_args_pool.chunk(indirect_commands).unwrap(); let indirect_buffer = indirect_args_pool.from_iter(indirect_commands).unwrap();
// Allocate a GPU buffer to hold this frames vertices. This needs to be large enough to hold // Allocate a GPU buffer to hold this frames vertices. This needs to be large enough to hold
// the worst case number of vertices generated by the compute shader // the worst case number of vertices generated by the compute shader
let vertices = vertex_pool let vertices = vertex_pool
.chunk((0..(6 * 16)).map(|_| Vertex { position: [0.0; 2] })) .from_iter((0..(6 * 16)).map(|_| Vertex { position: [0.0; 2] }))
.unwrap(); .unwrap();
// Pass the two buffers to the compute shader // Pass the two buffers to the compute shader

View File

@ -298,7 +298,7 @@ fn main() {
proj: proj.into(), proj: proj.into(),
}; };
uniform_buffer.next(uniform_data).unwrap() uniform_buffer.from_data(uniform_data).unwrap()
}; };
let layout = pipeline.layout().set_layouts().get(0).unwrap(); let layout = pipeline.layout().set_layouts().get(0).unwrap();

View File

@ -74,7 +74,7 @@ use std::{
/// for n in 0 .. 25u32 { /// for n in 0 .. 25u32 {
/// // Each loop grabs a new entry from that ring buffer and stores ` data` in it. /// // Each loop grabs a new entry from that ring buffer and stores ` data` in it.
/// let data: [f32; 4] = [1.0, 0.5, n as f32 / 24.0, 0.0]; /// let data: [f32; 4] = [1.0, 0.5, n as f32 / 24.0, 0.0];
/// let sub_buffer = buffer.next(data).unwrap(); /// let sub_buffer = buffer.from_data(data).unwrap();
/// ///
/// // You can then use `sub_buffer` as if it was an entirely separate buffer. /// // You can then use `sub_buffer` as if it was an entirely separate buffer.
/// AutoCommandBufferBuilder::primary(device.clone(), queue.queue_family_index(), CommandBufferUsage::OneTimeSubmit) /// AutoCommandBufferBuilder::primary(device.clone(), queue.queue_family_index(), CommandBufferUsage::OneTimeSubmit)
@ -346,13 +346,16 @@ where
/// > **Note**: You can think of it like a `Vec`. If you insert an element and the `Vec` is not /// > **Note**: You can think of it like a `Vec`. If you insert an element and the `Vec` is not
/// > large enough, a new chunk of memory is automatically allocated. /// > large enough, a new chunk of memory is automatically allocated.
#[inline] #[inline]
pub fn next(&self, data: T) -> Result<Arc<CpuBufferPoolSubbuffer<T, A>>, DeviceMemoryError> { pub fn from_data(
&self,
data: T,
) -> Result<Arc<CpuBufferPoolSubbuffer<T, A>>, DeviceMemoryError> {
Ok(Arc::new(CpuBufferPoolSubbuffer { Ok(Arc::new(CpuBufferPoolSubbuffer {
chunk: self.chunk_impl([data].into_iter())?, chunk: self.chunk_impl([data].into_iter())?,
})) }))
} }
/// Grants access to a new subbuffer and puts `data` in it. /// Grants access to a new subbuffer and puts all elements of `iter` in it.
/// ///
/// If no subbuffer is available (because they are still in use by the GPU), a new buffer will /// If no subbuffer is available (because they are still in use by the GPU), a new buffer will
/// automatically be allocated. /// automatically be allocated.
@ -362,14 +365,15 @@ where
/// ///
/// # Panic /// # Panic
/// ///
/// Panics if the length of the iterator didn't match the actual number of element. /// Panics if the length of the iterator didn't match the actual number of elements.
/// ///
pub fn chunk<I>(&self, data: I) -> Result<Arc<CpuBufferPoolChunk<T, A>>, DeviceMemoryError> #[inline]
pub fn from_iter<I>(&self, iter: I) -> Result<Arc<CpuBufferPoolChunk<T, A>>, DeviceMemoryError>
where where
I: IntoIterator<Item = T>, I: IntoIterator<Item = T>,
I::IntoIter: ExactSizeIterator, I::IntoIter: ExactSizeIterator,
{ {
self.chunk_impl(data.into_iter()).map(Arc::new) self.chunk_impl(iter.into_iter()).map(Arc::new)
} }
fn chunk_impl( fn chunk_impl(
@ -910,12 +914,12 @@ mod tests {
let pool = CpuBufferPool::upload(device); let pool = CpuBufferPool::upload(device);
assert_eq!(pool.capacity(), 0); assert_eq!(pool.capacity(), 0);
pool.next(12).unwrap(); pool.from_data(12).unwrap();
let first_cap = pool.capacity(); let first_cap = pool.capacity();
assert!(first_cap >= 1); assert!(first_cap >= 1);
for _ in 0..first_cap + 5 { for _ in 0..first_cap + 5 {
mem::forget(pool.next(12).unwrap()); mem::forget(pool.from_data(12).unwrap());
} }
assert!(pool.capacity() > first_cap); assert!(pool.capacity() > first_cap);
@ -930,7 +934,7 @@ mod tests {
let mut capacity = None; let mut capacity = None;
for _ in 0..64 { for _ in 0..64 {
pool.next(12).unwrap(); pool.from_data(12).unwrap();
let new_cap = pool.capacity(); let new_cap = pool.capacity();
assert!(new_cap >= 1); assert!(new_cap >= 1);
@ -948,12 +952,12 @@ mod tests {
let pool = CpuBufferPool::<u8>::upload(device); let pool = CpuBufferPool::<u8>::upload(device);
pool.reserve(5).unwrap(); pool.reserve(5).unwrap();
let a = pool.chunk(vec![0, 0]).unwrap(); let a = pool.from_iter(vec![0, 0]).unwrap();
let b = pool.chunk(vec![0, 0]).unwrap(); let b = pool.from_iter(vec![0, 0]).unwrap();
assert_eq!(b.index, 2); assert_eq!(b.index, 2);
drop(a); drop(a);
let c = pool.chunk(vec![0, 0]).unwrap(); let c = pool.from_iter(vec![0, 0]).unwrap();
assert_eq!(c.index, 0); assert_eq!(c.index, 0);
assert_eq!(pool.capacity(), 5); assert_eq!(pool.capacity(), 5);
@ -965,7 +969,7 @@ mod tests {
let pool = CpuBufferPool::<u8>::upload(device); let pool = CpuBufferPool::<u8>::upload(device);
let _ = pool.chunk(vec![]).unwrap(); let _ = pool.from_iter(vec![]).unwrap();
let _ = pool.chunk(vec![0, 0]).unwrap(); let _ = pool.from_iter(vec![0, 0]).unwrap();
} }
} }

View File

@ -36,11 +36,11 @@
//! - A [`CpuBufferPool`](crate::buffer::cpu_pool::CpuBufferPool) is a ring buffer that can be used to //! - A [`CpuBufferPool`](crate::buffer::cpu_pool::CpuBufferPool) is a ring buffer that can be used to
//! transfer data between the CPU and the GPU at a high rate. //! transfer data between the CPU and the GPU at a high rate.
//! - A [`CpuAccessibleBuffer`](crate::buffer::cpu_access::CpuAccessibleBuffer) is a simple buffer that //! - A [`CpuAccessibleBuffer`](crate::buffer::cpu_access::CpuAccessibleBuffer) is a simple buffer that
//! can be used to prototype. It may be removed from vulkano in the far future. //! can be used to prototype.
//! //!
//! Here is a quick way to choose which buffer to use. Do you often need to read or write //! Here is a quick way to choose which buffer to use. Do you often need to read or write
//! the content of the buffer? If so, use a `CpuBufferPool`. Otherwise, do you need to be able to //! the content of the buffer? If so, use a `CpuBufferPool`. Otherwise, do you need to have access
//! modify the content of the buffer after its initialization? If so, use a `DeviceLocalBuffer`. //! to the buffer on the CPU? Then use `CpuAccessibleBuffer`. Otherwise, use a `DeviceLocalBuffer`.
//! //!
//! Another example: if a buffer is under constant access by the GPU but you need to //! Another example: if a buffer is under constant access by the GPU but you need to
//! read its content on the CPU from time to time, it may be a good idea to use a //! read its content on the CPU from time to time, it may be a good idea to use a