Rework the CommandPool API

This commit is contained in:
Pierre Krieger 2017-05-07 14:19:50 +02:00
parent 92b75b6d9f
commit a75671181d
6 changed files with 250 additions and 242 deletions

View File

@ -33,6 +33,13 @@ use OomError;
type Cb<P> = cb::DeviceCheckLayer<cb::QueueTyCheckLayer<cb::ContextCheckLayer<cb::StateCacheLayer<cb::SubmitSyncBuilderLayer<cb::AutoPipelineBarriersLayer<cb::AbstractStorageLayer<cb::UnsafeCommandBufferBuilder<P>>>>>>>>;
///
///
/// Note that command buffers allocated from the default command pool (`Arc<StandardCommandPool>`)
/// don't implement the `Send` and `Sync` traits. If you use this pool, then the
/// `AutoCommandBufferBuilder` will not implement `Send` and `Sync` either. Once a command buffer
/// is built, however, it *does* implement `Send` and `Sync`.
///
pub struct AutoCommandBufferBuilder<P = Arc<StandardCommandPool>> where P: CommandPool {
inner: Cb<P>
}
@ -44,7 +51,7 @@ impl AutoCommandBufferBuilder<Arc<StandardCommandPool>> {
let pool = Device::standard_command_pool(&device, queue_family);
let cmd = unsafe {
let c = try!(cb::UnsafeCommandBufferBuilder::new(pool, cb::Kind::primary(), cb::Flags::SimultaneousUse /* TODO: */));
let c = try!(cb::UnsafeCommandBufferBuilder::new(&pool, cb::Kind::primary(), cb::Flags::SimultaneousUse /* TODO: */));
let c = cb::AbstractStorageLayer::new(c);
let c = cb::AutoPipelineBarriersLayer::new(c);
let c = cb::SubmitSyncBuilderLayer::new(c);

View File

@ -16,8 +16,9 @@ use buffer::BufferAccess;
use command_buffer::CommandBuffer;
use command_buffer::CommandBufferBuilder;
use command_buffer::cb::CommandBufferBuild;
use command_buffer::pool::AllocatedCommandBuffer;
use command_buffer::pool::CommandPool;
use command_buffer::pool::CommandPoolBuilderAlloc;
use command_buffer::pool::CommandPoolAlloc;
use device::Device;
use device::DeviceOwned;
use device::Queue;
@ -96,20 +97,21 @@ pub enum Flags {
///
/// When you are finished adding commands, you can use the `CommandBufferBuild` trait to turn this
/// builder into an `UnsafeCommandBuffer`.
// TODO: change P parameter to be a CommandPoolBuilderAlloc
pub struct UnsafeCommandBufferBuilder<P> where P: CommandPool {
// The Vulkan command buffer. Will be 0 if `build()` has been called.
cmd: vk::CommandBuffer,
// The command buffer obtained from the pool. Contains `None` if `build()` has been called.
cmd: Option<P::Builder>,
// Device that owns the command buffer.
// TODO: necessary?
device: Arc<Device>,
// Pool that owns the command buffer.
pool: Option<P>,
// Flags that were used at creation.
// TODO: necessary?
flags: Flags,
// True if we are a secondary command buffer.
// TODO: necessary?
secondary_cb: bool,
}
@ -127,7 +129,7 @@ impl<P> UnsafeCommandBufferBuilder<P> where P: CommandPool {
///
/// > **Note**: Some checks are still made with `debug_assert!`. Do not expect to be able to
/// > submit invalid commands.
pub unsafe fn new<R, F>(pool: P, kind: Kind<R, F>, flags: Flags)
pub unsafe fn new<R, F>(pool: &P, kind: Kind<R, F>, flags: Flags)
-> Result<UnsafeCommandBufferBuilder<P>, OomError>
where R: RenderPassAbstract, F: FramebufferAbstract
{
@ -136,16 +138,9 @@ impl<P> UnsafeCommandBufferBuilder<P> where P: CommandPool {
Kind::Secondary | Kind::SecondaryRenderPass { .. } => true,
};
let cmd = try!(pool.alloc(secondary, 1)).next().unwrap();
match UnsafeCommandBufferBuilder::already_allocated(pool, cmd, kind, flags) {
Ok(cmd) => Ok(cmd),
Err(err) => {
// FIXME: uncomment this and solve the fact that `pool` has been moved
//unsafe { pool.free(secondary, Some(cmd.into()).into_iter()) };
Err(err)
},
}
let cmd = try!(pool.alloc(secondary, 1)).next().expect("Requested one command buffer from \
the command pool, but got zero.");
UnsafeCommandBufferBuilder::already_allocated(cmd, kind, flags)
}
/// Creates a new command buffer builder from an already-allocated command buffer.
@ -154,17 +149,15 @@ impl<P> UnsafeCommandBufferBuilder<P> where P: CommandPool {
///
/// See the `new` method.
///
/// The allocated command buffer must belong to the pool and must not be used anywhere else
/// in the code for the duration of this command buffer.
/// The kind must match how the command buffer was allocated.
///
pub unsafe fn already_allocated<R, F>(pool: P, cmd: AllocatedCommandBuffer,
kind: Kind<R, F>, flags: Flags)
pub unsafe fn already_allocated<R, F>(alloc: P::Builder, kind: Kind<R, F>, flags: Flags)
-> Result<UnsafeCommandBufferBuilder<P>, OomError>
where R: RenderPassAbstract, F: FramebufferAbstract
{
let device = pool.device().clone();
let device = alloc.device().clone();
let vk = device.pointers();
let cmd = cmd.internal_object();
let cmd = alloc.inner().internal_object();
let vk_flags = {
let a = match flags {
@ -218,9 +211,8 @@ impl<P> UnsafeCommandBufferBuilder<P> where P: CommandPool {
try!(check_errors(vk.BeginCommandBuffer(cmd, &infos)));
Ok(UnsafeCommandBufferBuilder {
cmd: Some(alloc),
device: device.clone(),
pool: Some(pool),
cmd: cmd,
flags: flags,
secondary_cb: match kind {
Kind::Primary => false,
@ -240,7 +232,7 @@ unsafe impl<P> DeviceOwned for UnsafeCommandBufferBuilder<P> where P: CommandPoo
unsafe impl<P> CommandBufferBuilder for UnsafeCommandBufferBuilder<P> where P: CommandPool {
#[inline]
fn queue_family(&self) -> QueueFamily {
self.pool.as_ref().unwrap().queue_family()
self.cmd.as_ref().unwrap().queue_family()
}
}
@ -249,20 +241,7 @@ unsafe impl<P> VulkanObject for UnsafeCommandBufferBuilder<P> where P: CommandPo
#[inline]
fn internal_object(&self) -> vk::CommandBuffer {
self.cmd
}
}
impl<P> Drop for UnsafeCommandBufferBuilder<P> where P: CommandPool {
#[inline]
fn drop(&mut self) {
//unsafe {
if self.cmd == 0 {
return;
}
// FIXME: vk.FreeCommandBuffers()
//}
self.cmd.as_ref().unwrap().inner().internal_object()
}
}
@ -275,16 +254,13 @@ unsafe impl<P> CommandBufferBuild for UnsafeCommandBufferBuilder<P>
#[inline]
fn build(mut self) -> Result<Self::Out, OomError> {
unsafe {
debug_assert_ne!(self.cmd, 0);
let cmd = self.cmd;
let cmd = self.cmd.take().unwrap();
let vk = self.device.pointers();
try!(check_errors(vk.EndCommandBuffer(cmd)));
self.cmd = 0; // Prevents the `Drop` impl of the builder from destroying the cb.
try!(check_errors(vk.EndCommandBuffer(cmd.inner().internal_object())));
Ok(UnsafeCommandBuffer {
cmd: cmd,
cmd: cmd.into_alloc(),
device: self.device.clone(),
pool: self.pool.take().unwrap().finish(),
flags: self.flags,
already_submitted: AtomicBool::new(false),
secondary_cb: self.secondary_cb
@ -296,17 +272,17 @@ unsafe impl<P> CommandBufferBuild for UnsafeCommandBufferBuilder<P>
/// Command buffer that has been built.
///
/// Doesn't perform any synchronization and doesn't keep the object it uses alive.
// TODO: change P parameter to be a CommandPoolAlloc
pub struct UnsafeCommandBuffer<P> where P: CommandPool {
// The Vulkan command buffer.
cmd: vk::CommandBuffer,
cmd: P::Alloc,
// Device that owns the command buffer.
// TODO: necessary?
device: Arc<Device>,
// Pool that owns the command buffer.
pool: P::Finished,
// Flags that were used at creation.
// TODO: necessary?
flags: Flags,
// True if the command buffer has always been submitted once. Only relevant if `flags` is
@ -358,17 +334,6 @@ unsafe impl<P> VulkanObject for UnsafeCommandBuffer<P> where P: CommandPool {
#[inline]
fn internal_object(&self) -> vk::CommandBuffer {
self.cmd
}
}
impl<P> Drop for UnsafeCommandBuffer<P> where P: CommandPool {
#[inline]
fn drop(&mut self) {
// release this command buffer for reuse
use command_buffer::pool::CommandPoolFinished;
unsafe {
self.pool.free(self.secondary_cb, Some(self.cmd.into()).into_iter());
}
self.cmd.inner().internal_object()
}
}

View File

@ -20,95 +20,84 @@ use instance::QueueFamily;
use device::DeviceOwned;
use OomError;
use VulkanObject;
use vk;
pub use self::standard::StandardCommandPool;
pub use self::standard::StandardCommandPoolFinished;
pub use self::sys::UnsafeCommandPool;
pub use self::sys::UnsafeCommandPoolAlloc;
pub use self::sys::UnsafeCommandPoolAllocIter;
pub use self::sys::CommandPoolTrimError;
mod standard;
pub mod standard;
mod sys;
/// Types that manage the memory of command buffers.
///
/// # Safety
///
/// A Vulkan command pool must be externally synchronized as if it owned the command buffers that
/// were allocated from it. This includes allocating from the pool, freeing from the pool,
/// resetting the pool or individual command buffers, and most importantly recording commands to
/// command buffers.
///
/// The implementation of `CommandPool` is expected to manage this. For as long as a `Builder`
/// is alive, the trait implementation is expected to lock the pool that allocated the `Builder`
/// for the current thread.
///
/// > **Note**: This may be modified in the future to allow different implementation strategies.
///
/// The destructors of the `CommandPoolBuilderAlloc` and the `CommandPoolAlloc` are expected to
/// free the command buffer, reset the command buffer, or add it to a pool so that it gets reused.
/// If the implementation frees or resets the command buffer, it must not forget that this
/// operation must lock the pool.
///
pub unsafe trait CommandPool: DeviceOwned {
/// See `alloc()`.
type Iter: Iterator<Item = AllocatedCommandBuffer>;
/// See `lock()`.
type Lock;
/// See `finish()`.
type Finished: CommandPoolFinished;
type Iter: Iterator<Item = Self::Builder>;
/// Represents a command buffer that has been allocated and that is currently being built.
type Builder: CommandPoolBuilderAlloc<Alloc = Self::Alloc>;
/// Represents a command buffer that has been allocated and that is pending execution or is
/// being executed.
type Alloc: CommandPoolAlloc;
/// Allocates command buffers from this pool.
///
/// Returns an iterator that contains an bunch of allocated command buffers.
fn alloc(&self, secondary: bool, count: u32) -> Result<Self::Iter, OomError>;
/// Frees command buffers from this pool.
///
/// # Safety
///
/// - The command buffers must have been allocated from this pool.
/// - `secondary` must have the same value as what was passed to `alloc`.
///
unsafe fn free<I>(&self, secondary: bool, command_buffers: I)
where I: Iterator<Item = AllocatedCommandBuffer>;
/// Once a command buffer has finished being built, it should call this method in order to
/// produce a `Finished` object.
///
/// The `Finished` object must hold the pool alive.
///
/// The point of this object is to change the Send/Sync strategy after a command buffer has
/// finished being built compared to before.
fn finish(self) -> Self::Finished;
/// Before any command buffer allocated from this pool can be modified, the pool itself must
/// be locked by calling this method.
///
/// All the operations are atomic at the thread level, so the point of this lock is to
/// prevent the pool from being accessed from multiple threads in parallel.
fn lock(&self) -> Self::Lock;
/// Returns true if command buffers can be reset individually. In other words, if the pool
/// was created with `reset_cb` set to true.
fn can_reset_invidual_command_buffers(&self) -> bool;
/// Returns the queue family that this pool targets.
fn queue_family(&self) -> QueueFamily;
}
/// See `CommandPool::finish()`.
pub unsafe trait CommandPoolFinished: DeviceOwned {
/// Frees command buffers.
///
/// # Safety
///
/// - The command buffers must have been allocated from this pool.
/// - `secondary` must have the same value as what was passed to `alloc`.
///
unsafe fn free<I>(&self, secondary: bool, command_buffers: I)
where I: Iterator<Item = AllocatedCommandBuffer>;
/// A command buffer allocated from a pool and that can be recorded.
///
/// # Safety
///
/// See `CommandPool` for information about safety.
///
pub unsafe trait CommandPoolBuilderAlloc: DeviceOwned {
/// Return type of `into_alloc`.
type Alloc: CommandPoolAlloc;
/// Returns the queue family that this pool targets.
/// Returns the internal object that contains the command buffer.
fn inner(&self) -> &UnsafeCommandPoolAlloc;
/// Turns this builder into a command buffer that is pending execution.
fn into_alloc(self) -> Self::Alloc;
/// Returns the queue family that the pool targets.
fn queue_family(&self) -> QueueFamily;
}
/// Opaque type that represents a command buffer allocated from a pool.
pub struct AllocatedCommandBuffer(vk::CommandBuffer);
/// A command buffer allocated from a pool that has finished being recorded.
///
/// # Safety
///
/// See `CommandPool` for information about safety.
///
pub unsafe trait CommandPoolAlloc: DeviceOwned {
/// Returns the internal object that contains the command buffer.
fn inner(&self) -> &UnsafeCommandPoolAlloc;
impl From<vk::CommandBuffer> for AllocatedCommandBuffer {
#[inline]
fn from(cmd: vk::CommandBuffer) -> AllocatedCommandBuffer {
AllocatedCommandBuffer(cmd)
}
}
unsafe impl VulkanObject for AllocatedCommandBuffer {
type Object = vk::CommandBuffer;
#[inline]
fn internal_object(&self) -> vk::CommandBuffer {
self.0
}
/// Returns the queue family that the pool targets.
fn queue_family(&self) -> QueueFamily;
}

View File

@ -8,21 +8,19 @@
// according to those terms.
use std::cmp;
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::hash::BuildHasherDefault;
use std::iter::Chain;
use std::marker::PhantomData;
use std::sync::Arc;
use std::sync::Mutex;
use std::vec::IntoIter as VecIntoIter;
use std::sync::Weak;
use fnv::FnvHasher;
use command_buffer::pool::AllocatedCommandBuffer;
use command_buffer::pool::CommandPool;
use command_buffer::pool::CommandPoolFinished;
use command_buffer::pool::CommandPoolAlloc;
use command_buffer::pool::CommandPoolBuilderAlloc;
use command_buffer::pool::UnsafeCommandPool;
use command_buffer::pool::UnsafeCommandPoolAllocIter;
use command_buffer::pool::UnsafeCommandPoolAlloc;
use instance::QueueFamily;
use device::Device;
@ -39,7 +37,8 @@ fn curr_thread_id() -> usize { THREAD_ID.with(|data| &**data as *const u8 as usi
/// Standard implementation of a command pool.
///
/// Will use one Vulkan pool per thread in order to avoid locking. Will try to reuse command
/// buffers. Locking is required only when allocating/freeing command buffers.
/// buffers. Command buffers can't be moved between threads during the building process, but
/// finished command buffers can.
pub struct StandardCommandPool {
// The device.
device: Arc<Device>,
@ -48,17 +47,20 @@ pub struct StandardCommandPool {
queue_family: u32,
// For each "thread id" (see `THREAD_ID` above), we store thread-specific info.
per_thread: Mutex<HashMap<usize, StandardCommandPoolPerThread, BuildHasherDefault<FnvHasher>>>,
per_thread: Mutex<HashMap<usize, Weak<Mutex<StandardCommandPoolPerThread>>,
BuildHasherDefault<FnvHasher>>>,
}
// Dummy marker in order to not implement `Send` and `Sync`.
//
// Since `StandardCommandPool` isn't Send/Sync, then the command buffers that use this pool
// won't be Send/Sync either, which means that we don't need to lock the pool while the CB
// is being built.
//
// However `StandardCommandPoolFinished` *is* Send/Sync because the only operation that can
// be called on `StandardCommandPoolFinished` is freeing, and freeing does actually lock.
dummy_avoid_send_sync: PhantomData<*const u8>,
unsafe impl Send for StandardCommandPool {}
unsafe impl Sync for StandardCommandPool {}
struct StandardCommandPoolPerThread {
// The Vulkan pool of this thread.
pool: UnsafeCommandPool,
// List of existing primary command buffers that are available for reuse.
available_primary_command_buffers: Vec<UnsafeCommandPoolAlloc>,
// List of existing secondary command buffers that are available for reuse.
available_secondary_command_buffers: Vec<UnsafeCommandPoolAlloc>,
}
impl StandardCommandPool {
@ -68,103 +70,80 @@ impl StandardCommandPool {
///
/// - Panics if the device and the queue family don't belong to the same physical device.
///
pub fn new(device: &Arc<Device>, queue_family: QueueFamily) -> StandardCommandPool {
pub fn new(device: Arc<Device>, queue_family: QueueFamily) -> StandardCommandPool {
assert_eq!(device.physical_device().internal_object(),
queue_family.physical_device().internal_object());
StandardCommandPool {
device: device.clone(),
device: device,
queue_family: queue_family.id(),
per_thread: Mutex::new(Default::default()),
dummy_avoid_send_sync: PhantomData,
}
}
}
struct StandardCommandPoolPerThread {
// The Vulkan pool of this thread.
pool: UnsafeCommandPool,
// List of existing primary command buffers that are available for reuse.
available_primary_command_buffers: Vec<AllocatedCommandBuffer>,
// List of existing secondary command buffers that are available for reuse.
available_secondary_command_buffers: Vec<AllocatedCommandBuffer>,
}
unsafe impl CommandPool for Arc<StandardCommandPool> {
type Iter = Chain<VecIntoIter<AllocatedCommandBuffer>, UnsafeCommandPoolAllocIter>;
type Lock = ();
type Finished = StandardCommandPoolFinished;
type Iter = Box<Iterator<Item = StandardCommandPoolBuilder>>; // TODO: meh for Box
type Builder = StandardCommandPoolBuilder;
type Alloc = StandardCommandPoolAlloc;
fn alloc(&self, secondary: bool, count: u32) -> Result<Self::Iter, OomError> {
// Find the correct `StandardCommandPoolPerThread` structure.
let mut per_thread = self.per_thread.lock().unwrap();
let mut per_thread = match per_thread.entry(curr_thread_id()) {
Entry::Occupied(entry) => entry.into_mut(),
Entry::Vacant(entry) => {
let mut hashmap = self.per_thread.lock().unwrap();
//hashmap.retain(|_, w| w.upgrade().is_some()); // TODO: unstable // TODO: meh for iterating everything every time
// TODO: this hashmap lookup can probably be optimized
let curr_thread_id = curr_thread_id();
let per_thread = hashmap.get(&curr_thread_id).and_then(|p| p.upgrade());
let per_thread = match per_thread {
Some(pt) => pt,
None => {
let new_pool = try!(UnsafeCommandPool::new(self.device.clone(), self.queue_family(),
false, true));
entry.insert(StandardCommandPoolPerThread {
let pt = Arc::new(Mutex::new(StandardCommandPoolPerThread {
pool: new_pool,
available_primary_command_buffers: Vec::new(),
available_secondary_command_buffers: Vec::new(),
})
}));
hashmap.insert(curr_thread_id, Arc::downgrade(&pt));
pt
},
};
// Which list of already-existing command buffers we are going to pick CBs from.
let mut existing = if secondary { &mut per_thread.available_secondary_command_buffers }
else { &mut per_thread.available_primary_command_buffers };
let mut pt_lock = per_thread.lock().unwrap();
// Build an iterator to pick from already-existing command buffers.
let num_from_existing = cmp::min(count as usize, existing.len());
let from_existing = existing.drain(0 .. num_from_existing).collect::<Vec<_>>().into_iter();
let (num_from_existing, from_existing) = {
// Which list of already-existing command buffers we are going to pick CBs from.
let mut existing = if secondary { &mut pt_lock.available_secondary_command_buffers }
else { &mut pt_lock.available_primary_command_buffers };
let num_from_existing = cmp::min(count as usize, existing.len());
let from_existing = existing.drain(0 .. num_from_existing).collect::<Vec<_>>().into_iter();
(num_from_existing, from_existing)
};
// Build an iterator to construct the missing command buffers from the Vulkan pool.
let num_new = count as usize - num_from_existing;
debug_assert!(num_new <= count as usize); // Check overflows.
let newly_allocated = try!(per_thread.pool.alloc_command_buffers(secondary, num_new));
let newly_allocated = try!(pt_lock.pool.alloc_command_buffers(secondary, num_new));
// Returning them as a chain.
Ok(from_existing.chain(newly_allocated))
}
unsafe fn free<I>(&self, secondary: bool, command_buffers: I)
where I: Iterator<Item = AllocatedCommandBuffer>
{
// Do not actually free the command buffers. Instead adding them to the list of command
// buffers available for reuse.
let mut per_thread = self.per_thread.lock().unwrap();
let mut per_thread = per_thread.get_mut(&curr_thread_id()).unwrap();
if secondary {
for cb in command_buffers {
per_thread.available_secondary_command_buffers.push(cb);
let device = self.device.clone();
let queue_family_id = self.queue_family;
let per_thread = per_thread.clone();
let final_iter = from_existing.chain(newly_allocated).map(move |cmd| {
StandardCommandPoolBuilder {
cmd: Some(cmd),
pool: per_thread.clone(),
secondary: secondary,
device: device.clone(),
queue_family_id: queue_family_id,
dummy_avoid_send_sync: PhantomData,
}
} else {
for cb in command_buffers {
per_thread.available_primary_command_buffers.push(cb);
}
}
}
}).collect::<Vec<_>>();
#[inline]
fn finish(self) -> Self::Finished {
StandardCommandPoolFinished {
pool: self,
thread_id: curr_thread_id(),
}
}
#[inline]
fn lock(&self) -> Self::Lock {
()
}
#[inline]
fn can_reset_invidual_command_buffers(&self) -> bool {
true
Ok(Box::new(final_iter.into_iter()))
}
#[inline]
@ -180,42 +159,99 @@ unsafe impl DeviceOwned for StandardCommandPool {
}
}
pub struct StandardCommandPoolFinished {
pool: Arc<StandardCommandPool>,
thread_id: usize,
pub struct StandardCommandPoolBuilder {
cmd: Option<UnsafeCommandPoolAlloc>,
pool: Arc<Mutex<StandardCommandPoolPerThread>>,
secondary: bool,
device: Arc<Device>,
queue_family_id: u32,
dummy_avoid_send_sync: PhantomData<*const u8>,
}
unsafe impl CommandPoolFinished for StandardCommandPoolFinished {
unsafe fn free<I>(&self, secondary: bool, command_buffers: I)
where I: Iterator<Item = AllocatedCommandBuffer>
{
let mut per_thread = self.pool.per_thread.lock().unwrap();
let mut per_thread = per_thread.get_mut(&curr_thread_id()).unwrap();
unsafe impl CommandPoolBuilderAlloc for StandardCommandPoolBuilder {
type Alloc = StandardCommandPoolAlloc;
if secondary {
for cb in command_buffers {
per_thread.available_secondary_command_buffers.push(cb);
}
} else {
for cb in command_buffers {
per_thread.available_primary_command_buffers.push(cb);
}
#[inline]
fn inner(&self) -> &UnsafeCommandPoolAlloc {
self.cmd.as_ref().unwrap()
}
#[inline]
fn into_alloc(mut self) -> Self::Alloc {
StandardCommandPoolAlloc {
cmd: Some(self.cmd.take().unwrap()),
pool: self.pool.clone(),
secondary: self.secondary,
device: self.device.clone(),
queue_family_id: self.queue_family_id,
}
}
#[inline]
fn queue_family(&self) -> QueueFamily {
self.pool.queue_family()
self.device.physical_device().queue_family_by_id(self.queue_family_id).unwrap()
}
}
unsafe impl DeviceOwned for StandardCommandPoolFinished {
unsafe impl DeviceOwned for StandardCommandPoolBuilder {
#[inline]
fn device(&self) -> &Arc<Device> {
self.pool.device()
&self.device
}
}
// See `StandardCommandPool` for comments about this.
unsafe impl Send for StandardCommandPoolFinished {}
unsafe impl Sync for StandardCommandPoolFinished {}
impl Drop for StandardCommandPoolBuilder {
fn drop(&mut self) {
if let Some(cmd) = self.cmd.take() {
let mut pool = self.pool.lock().unwrap();
if self.secondary {
pool.available_secondary_command_buffers.push(cmd);
} else {
pool.available_primary_command_buffers.push(cmd);
}
}
}
}
pub struct StandardCommandPoolAlloc {
cmd: Option<UnsafeCommandPoolAlloc>,
pool: Arc<Mutex<StandardCommandPoolPerThread>>,
secondary: bool,
device: Arc<Device>,
queue_family_id: u32,
}
unsafe impl Send for StandardCommandPoolAlloc {}
unsafe impl Sync for StandardCommandPoolAlloc {}
unsafe impl CommandPoolAlloc for StandardCommandPoolAlloc {
#[inline]
fn inner(&self) -> &UnsafeCommandPoolAlloc {
self.cmd.as_ref().unwrap()
}
#[inline]
fn queue_family(&self) -> QueueFamily {
self.device.physical_device().queue_family_by_id(self.queue_family_id).unwrap()
}
}
unsafe impl DeviceOwned for StandardCommandPoolAlloc {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl Drop for StandardCommandPoolAlloc {
fn drop(&mut self) {
let mut pool = self.pool.lock().unwrap();
if self.secondary {
pool.available_secondary_command_buffers.push(self.cmd.take().unwrap());
} else {
pool.available_primary_command_buffers.push(self.cmd.take().unwrap());
}
}
}

View File

@ -16,7 +16,6 @@ use std::sync::Arc;
use std::vec::IntoIter as VecIntoIter;
use smallvec::SmallVec;
use command_buffer::pool::AllocatedCommandBuffer;
use instance::QueueFamily;
use device::Device;
@ -184,7 +183,7 @@ impl UnsafeCommandPool {
/// The command buffers must have been allocated from this pool. They must not be in use.
///
pub unsafe fn free_command_buffers<I>(&self, command_buffers: I)
where I: Iterator<Item = AllocatedCommandBuffer>
where I: Iterator<Item = UnsafeCommandPoolAlloc>
{
let command_buffers: SmallVec<[_; 4]> = command_buffers.map(|cb| cb.0).collect();
let vk = self.device.pointers();
@ -225,6 +224,18 @@ impl Drop for UnsafeCommandPool {
}
}
/// Opaque type that represents a command buffer allocated from a pool.
pub struct UnsafeCommandPoolAlloc(vk::CommandBuffer);
unsafe impl VulkanObject for UnsafeCommandPoolAlloc {
type Object = vk::CommandBuffer;
#[inline]
fn internal_object(&self) -> vk::CommandBuffer {
self.0
}
}
/// Iterator for newly-allocated command buffers.
#[derive(Debug)]
pub struct UnsafeCommandPoolAllocIter {
@ -232,11 +243,11 @@ pub struct UnsafeCommandPoolAllocIter {
}
impl Iterator for UnsafeCommandPoolAllocIter {
type Item = AllocatedCommandBuffer;
type Item = UnsafeCommandPoolAlloc;
#[inline]
fn next(&mut self) -> Option<AllocatedCommandBuffer> {
self.list.as_mut().and_then(|i| i.next()).map(|cb| AllocatedCommandBuffer(cb))
fn next(&mut self) -> Option<UnsafeCommandPoolAlloc> {
self.list.as_mut().and_then(|i| i.next()).map(|cb| UnsafeCommandPoolAlloc(cb))
}
#[inline]

View File

@ -392,12 +392,12 @@ impl Device {
return pool;
}
let new_pool = Arc::new(StandardCommandPool::new(me, queue));
let new_pool = Arc::new(StandardCommandPool::new(me.clone(), queue));
*entry.get_mut() = Arc::downgrade(&new_pool);
new_pool
},
Entry::Vacant(entry) => {
let new_pool = Arc::new(StandardCommandPool::new(me, queue));
let new_pool = Arc::new(StandardCommandPool::new(me.clone(), queue));
entry.insert(Arc::downgrade(&new_pool));
new_pool
}