mirror of
https://github.com/vulkano-rs/vulkano.git
synced 2024-11-25 16:25:31 +00:00
Fix thread safety issues
This commit is contained in:
parent
278a7a3e3f
commit
ce20e41629
@ -52,7 +52,7 @@ fn main() {
|
||||
};
|
||||
|
||||
|
||||
let cb_pool = vulkano::command_buffer::CommandBufferPool::new(&device, &queue.lock().unwrap().family())
|
||||
let cb_pool = vulkano::command_buffer::CommandBufferPool::new(&device, &queue.family())
|
||||
.expect("failed to create command buffer pool");
|
||||
|
||||
|
||||
@ -103,7 +103,7 @@ fn main() {
|
||||
let texture = vulkano::image::Image::<vulkano::image::Type2d, _, _>::new(&device, &vulkano::image::Usage::all(),
|
||||
vulkano::memory::DeviceLocal, &queue,
|
||||
vulkano::formats::R8G8B8A8Unorm, [93, 93], (), 1).unwrap();
|
||||
let texture = texture.transition(vulkano::image::Layout::ShaderReadOnlyOptimal, &cb_pool, &mut queue.lock().unwrap()).unwrap();
|
||||
let texture = texture.transition(vulkano::image::Layout::ShaderReadOnlyOptimal, &cb_pool, &queue).unwrap();
|
||||
let texture_view = vulkano::image::ImageView::new(&texture).expect("failed to create image view");
|
||||
|
||||
|
||||
@ -160,8 +160,7 @@ fn main() {
|
||||
|
||||
|
||||
let images = images.into_iter().map(|image| {
|
||||
let image = image.transition(vulkano::image::Layout::PresentSrc, &cb_pool,
|
||||
&mut queue.lock().unwrap()).unwrap();
|
||||
let image = image.transition(vulkano::image::Layout::PresentSrc, &cb_pool, &queue).unwrap();
|
||||
vulkano::image::ImageView::new(&image).expect("failed to create image view")
|
||||
}).collect::<Vec<_>>();
|
||||
|
||||
@ -215,10 +214,8 @@ fn main() {
|
||||
|
||||
loop {
|
||||
let image_num = swapchain.acquire_next_image(1000000).unwrap();
|
||||
let mut queue = queue.lock().unwrap();
|
||||
command_buffers[image_num].submit(&mut queue).unwrap();
|
||||
swapchain.present(&mut queue, image_num).unwrap();
|
||||
drop(queue);
|
||||
command_buffers[image_num].submit(&queue).unwrap();
|
||||
swapchain.present(&queue, image_num).unwrap();
|
||||
|
||||
for ev in window.poll_events() {
|
||||
match ev {
|
||||
|
@ -54,14 +54,14 @@ fn main() {
|
||||
};
|
||||
|
||||
|
||||
let cb_pool = vulkano::command_buffer::CommandBufferPool::new(&device, &queue.lock().unwrap().family())
|
||||
let cb_pool = vulkano::command_buffer::CommandBufferPool::new(&device, &queue.family())
|
||||
.expect("failed to create command buffer pool");
|
||||
|
||||
|
||||
let depth_buffer = vulkano::image::Image::<vulkano::image::Type2d, _, _>::new(&device, &vulkano::image::Usage::all(),
|
||||
vulkano::memory::DeviceLocal, &queue,
|
||||
vulkano::formats::D16Unorm, images[0].dimensions(), (), 1).unwrap();
|
||||
let depth_buffer = depth_buffer.transition(vulkano::image::Layout::DepthStencilAttachmentOptimal, &cb_pool, &mut queue.lock().unwrap()).unwrap();
|
||||
let depth_buffer = depth_buffer.transition(vulkano::image::Layout::DepthStencilAttachmentOptimal, &cb_pool, &queue).unwrap();
|
||||
let depth_buffer = vulkano::image::ImageView::new(&depth_buffer).expect("failed to create image view");
|
||||
|
||||
let vertex_buffer = vulkano::buffer::Buffer::<[teapot::Vertex], _>
|
||||
@ -126,7 +126,7 @@ fn main() {
|
||||
|
||||
let images = images.into_iter().map(|image| {
|
||||
let image = image.transition(vulkano::image::Layout::PresentSrc, &cb_pool,
|
||||
&mut queue.lock().unwrap()).unwrap();
|
||||
&queue).unwrap();
|
||||
vulkano::image::ImageView::new(&image).expect("failed to create image view")
|
||||
}).collect::<Vec<_>>();
|
||||
|
||||
@ -219,10 +219,8 @@ fn main() {
|
||||
|
||||
loop {
|
||||
let image_num = swapchain.acquire_next_image(1000000).unwrap();
|
||||
let mut queue = queue.lock().unwrap();
|
||||
command_buffers[image_num].submit(&mut queue).unwrap();
|
||||
swapchain.present(&mut queue, image_num).unwrap();
|
||||
drop(queue);
|
||||
command_buffers[image_num].submit(&queue).unwrap();
|
||||
swapchain.present(&queue, image_num).unwrap();
|
||||
|
||||
for ev in window.poll_events() {
|
||||
match ev {
|
||||
|
@ -63,7 +63,7 @@ pub unsafe trait AbstractBuffer: Resource + ::VulkanObjectU64 {
|
||||
///
|
||||
/// The function can return a semaphore which will be waited up by the GPU before the
|
||||
/// work starts.
|
||||
unsafe fn gpu_access(&self, write: bool, offset: usize, size: usize, queue: &mut Queue,
|
||||
unsafe fn gpu_access(&self, write: bool, offset: usize, size: usize, queue: &Arc<Queue>,
|
||||
fence: Option<Arc<Fence>>, semaphore: Option<Arc<Semaphore>>)
|
||||
-> Option<Arc<Semaphore>>;
|
||||
|
||||
@ -246,7 +246,7 @@ unsafe impl<T: ?Sized, M> AbstractBuffer for Buffer<T, M> where M: MemorySourceC
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn gpu_access(&self, write: bool, offset: usize, size: usize, queue: &mut Queue,
|
||||
unsafe fn gpu_access(&self, write: bool, offset: usize, size: usize, queue: &Arc<Queue>,
|
||||
fence: Option<Arc<Fence>>, semaphore: Option<Arc<Semaphore>>)
|
||||
-> Option<Arc<Semaphore>>
|
||||
{
|
||||
|
@ -31,6 +31,7 @@ use sync::Semaphore;
|
||||
|
||||
use device::Device;
|
||||
use OomError;
|
||||
use SynchronizedVulkanObject;
|
||||
use VulkanObject;
|
||||
use VulkanPointers;
|
||||
use check_errors;
|
||||
@ -75,10 +76,12 @@ impl InnerCommandBufferBuilder {
|
||||
let vk = device.pointers();
|
||||
|
||||
let cmd = unsafe {
|
||||
let pool = pool.internal_object_guard();
|
||||
|
||||
let infos = vk::CommandBufferAllocateInfo {
|
||||
sType: vk::STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
|
||||
pNext: ptr::null(),
|
||||
commandPool: pool.internal_object(),
|
||||
commandPool: *pool,
|
||||
level: if secondary {
|
||||
vk::COMMAND_BUFFER_LEVEL_SECONDARY
|
||||
} else {
|
||||
@ -608,8 +611,9 @@ impl Drop for InnerCommandBufferBuilder {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
vk.EndCommandBuffer(cmd);
|
||||
vk.FreeCommandBuffers(self.device.internal_object(), self.pool.internal_object(),
|
||||
1, &cmd);
|
||||
|
||||
let pool = self.pool.internal_object_guard();
|
||||
vk.FreeCommandBuffers(self.device.internal_object(), *pool, 1, &cmd);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -635,7 +639,7 @@ impl InnerCommandBuffer {
|
||||
/// - Panicks if the queue doesn't belong to the device this command buffer was created with.
|
||||
/// - Panicks if the queue doesn't belong to the family the pool was created with.
|
||||
///
|
||||
pub fn submit(&self, queue: &mut Queue) -> Result<(), OomError> { // TODO: wrong error type
|
||||
pub fn submit(&self, queue: &Arc<Queue>) -> Result<(), OomError> { // TODO: wrong error type
|
||||
// FIXME: the whole function should be checked
|
||||
let vk = self.device.pointers();
|
||||
|
||||
@ -723,7 +727,7 @@ impl InnerCommandBuffer {
|
||||
|
||||
unsafe {
|
||||
let fence = if let Some(ref fence) = fence { fence.internal_object() } else { 0 };
|
||||
try!(check_errors(vk.QueueSubmit(queue.internal_object(), 1, &infos, fence)));
|
||||
try!(check_errors(vk.QueueSubmit(*queue.internal_object_guard(), 1, &infos, fence)));
|
||||
}
|
||||
|
||||
// FIXME: the return value shouldn't be () because the command buffer
|
||||
@ -743,8 +747,8 @@ impl Drop for InnerCommandBuffer {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
vk.FreeCommandBuffers(self.device.internal_object(), self.pool.internal_object(),
|
||||
1, &self.cmd);
|
||||
let pool = self.pool.internal_object_guard();
|
||||
vk.FreeCommandBuffers(self.device.internal_object(), *pool, 1, &self.cmd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -437,7 +437,7 @@ impl PrimaryCommandBuffer {
|
||||
/// - Panicks if the queue doesn't belong to the family the pool was created with.
|
||||
///
|
||||
#[inline]
|
||||
pub fn submit(&self, queue: &mut Queue) -> Result<(), OomError> { // TODO: wrong error type
|
||||
pub fn submit(&self, queue: &Arc<Queue>) -> Result<(), OomError> { // TODO: wrong error type
|
||||
self.inner.submit(queue)
|
||||
}
|
||||
}
|
||||
|
@ -1,11 +1,14 @@
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::MutexGuard;
|
||||
|
||||
use instance::QueueFamily;
|
||||
|
||||
use device::Device;
|
||||
use OomError;
|
||||
use SynchronizedVulkanObject;
|
||||
use VulkanObject;
|
||||
use VulkanPointers;
|
||||
use check_errors;
|
||||
@ -13,8 +16,8 @@ use vk;
|
||||
|
||||
/// A pool from which command buffers are created from.
|
||||
pub struct CommandBufferPool {
|
||||
pool: Mutex<vk::CommandPool>,
|
||||
device: Arc<Device>,
|
||||
pool: vk::CommandPool,
|
||||
queue_family_index: u32,
|
||||
}
|
||||
|
||||
@ -52,8 +55,8 @@ impl CommandBufferPool {
|
||||
};
|
||||
|
||||
Ok(Arc::new(CommandBufferPool {
|
||||
pool: Mutex::new(pool),
|
||||
device: device.clone(),
|
||||
pool: pool,
|
||||
queue_family_index: queue_family.id(),
|
||||
}))
|
||||
}
|
||||
@ -71,12 +74,12 @@ impl CommandBufferPool {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl VulkanObject for CommandBufferPool {
|
||||
unsafe impl SynchronizedVulkanObject for CommandBufferPool {
|
||||
type Object = vk::CommandPool;
|
||||
|
||||
#[inline]
|
||||
fn internal_object(&self) -> vk::CommandPool {
|
||||
self.pool
|
||||
fn internal_object_guard(&self) -> MutexGuard<vk::CommandPool> {
|
||||
self.pool.lock().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@ -85,7 +88,8 @@ impl Drop for CommandBufferPool {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
vk.DestroyCommandPool(self.device.internal_object(), self.pool, ptr::null());
|
||||
let pool = self.pool.lock().unwrap();
|
||||
vk.DestroyCommandPool(self.device.internal_object(), *pool, ptr::null());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,10 +1,13 @@
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::MutexGuard;
|
||||
|
||||
use device::Device;
|
||||
|
||||
use OomError;
|
||||
use SynchronizedVulkanObject;
|
||||
use VulkanObject;
|
||||
use VulkanPointers;
|
||||
use check_errors;
|
||||
@ -15,7 +18,7 @@ use vk;
|
||||
/// A pool has a maximum number of descriptor sets and a maximum number of descriptors (one value
|
||||
/// per descriptor type) it can allocate.
|
||||
pub struct DescriptorPool {
|
||||
pool: vk::DescriptorPool,
|
||||
pool: Mutex<vk::DescriptorPool>,
|
||||
device: Arc<Device>,
|
||||
}
|
||||
|
||||
@ -50,7 +53,7 @@ impl DescriptorPool {
|
||||
};
|
||||
|
||||
Ok(Arc::new(DescriptorPool {
|
||||
pool: pool,
|
||||
pool: Mutex::new(pool),
|
||||
device: device.clone(),
|
||||
}))
|
||||
}
|
||||
@ -62,12 +65,12 @@ impl DescriptorPool {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl VulkanObject for DescriptorPool {
|
||||
unsafe impl SynchronizedVulkanObject for DescriptorPool {
|
||||
type Object = vk::DescriptorPool;
|
||||
|
||||
#[inline]
|
||||
fn internal_object(&self) -> vk::DescriptorPool {
|
||||
self.pool
|
||||
fn internal_object_guard(&self) -> MutexGuard<vk::DescriptorPool> {
|
||||
self.pool.lock().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@ -76,7 +79,8 @@ impl Drop for DescriptorPool {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
vk.DestroyDescriptorPool(self.device.internal_object(), self.pool, ptr::null());
|
||||
let pool = self.pool.lock().unwrap();
|
||||
vk.DestroyDescriptorPool(self.device.internal_object(), *pool, ptr::null());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ use image::AbstractImageView;
|
||||
use sampler::Sampler;
|
||||
|
||||
use OomError;
|
||||
use SynchronizedVulkanObject;
|
||||
use VulkanObject;
|
||||
use VulkanPointers;
|
||||
use check_errors;
|
||||
@ -61,10 +62,12 @@ impl<S> DescriptorSet<S> where S: DescriptorSetDesc {
|
||||
let vk = pool.device().pointers();
|
||||
|
||||
let set = {
|
||||
let pool_obj = pool.internal_object_guard();
|
||||
|
||||
let infos = vk::DescriptorSetAllocateInfo {
|
||||
sType: vk::STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
|
||||
pNext: ptr::null(),
|
||||
descriptorPool: pool.internal_object(),
|
||||
descriptorPool: *pool_obj,
|
||||
descriptorSetCount: 1,
|
||||
pSetLayouts: &layout.layout,
|
||||
};
|
||||
@ -252,7 +255,7 @@ impl<S> Drop for DescriptorSet<S> {
|
||||
unsafe {
|
||||
let vk = self.pool.device().pointers();
|
||||
vk.FreeDescriptorSets(self.pool.device().internal_object(),
|
||||
self.pool.internal_object(), 1, &self.set);
|
||||
*self.pool.internal_object_guard(), 1, &self.set);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::MutexGuard;
|
||||
|
||||
use instance::Features;
|
||||
use instance::Instance;
|
||||
@ -18,6 +19,7 @@ use instance::QueueFamily;
|
||||
|
||||
use Error;
|
||||
use OomError;
|
||||
use SynchronizedVulkanObject;
|
||||
use VulkanObject;
|
||||
use VulkanPointers;
|
||||
use check_errors;
|
||||
@ -57,7 +59,7 @@ impl Device {
|
||||
// TODO: return Arc<Queue> and handle synchronization in the Queue
|
||||
pub fn new<'a, I, L>(phys: &'a PhysicalDevice, requested_features: &Features, queue_families: I,
|
||||
layers: L)
|
||||
-> Result<(Arc<Device>, Vec<Arc<Mutex<Queue>>>), DeviceCreationError>
|
||||
-> Result<(Arc<Device>, Vec<Arc<Queue>>), DeviceCreationError>
|
||||
where I: IntoIterator<Item = (QueueFamily<'a>, f32)>,
|
||||
L: IntoIterator<Item = &'a &'a str>
|
||||
{
|
||||
@ -163,12 +165,12 @@ impl Device {
|
||||
unsafe {
|
||||
let mut output = mem::uninitialized();
|
||||
device.vk.GetDeviceQueue(device.device, family, id, &mut output);
|
||||
Arc::new(Mutex::new(Queue {
|
||||
Arc::new(Queue {
|
||||
queue: Mutex::new(output),
|
||||
device: device.clone(),
|
||||
queue: output,
|
||||
family: family,
|
||||
id: id,
|
||||
}))
|
||||
})
|
||||
}
|
||||
}).collect();
|
||||
|
||||
@ -276,8 +278,8 @@ impl From<Error> for DeviceCreationError {
|
||||
/// Represents a queue where commands can be submitted.
|
||||
// TODO: should use internal synchronization
|
||||
pub struct Queue {
|
||||
queue: Mutex<vk::Queue>,
|
||||
device: Arc<Device>,
|
||||
queue: vk::Queue,
|
||||
family: u32,
|
||||
id: u32, // id within family
|
||||
}
|
||||
@ -299,20 +301,21 @@ impl Queue {
|
||||
///
|
||||
/// Just like `Device::wait()`, you shouldn't have to call this function.
|
||||
#[inline]
|
||||
pub fn wait(&mut self) -> Result<(), OomError> {
|
||||
pub fn wait(&self) -> Result<(), OomError> {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
try!(check_errors(vk.QueueWaitIdle(self.queue)));
|
||||
let queue = self.queue.lock().unwrap();
|
||||
try!(check_errors(vk.QueueWaitIdle(*queue)));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl VulkanObject for Queue {
|
||||
unsafe impl SynchronizedVulkanObject for Queue {
|
||||
type Object = vk::Queue;
|
||||
|
||||
#[inline]
|
||||
fn internal_object(&self) -> vk::Queue {
|
||||
self.queue
|
||||
fn internal_object_guard(&self) -> MutexGuard<vk::Queue> {
|
||||
self.queue.lock().unwrap()
|
||||
}
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ use sync::Semaphore;
|
||||
use sync::SharingMode;
|
||||
|
||||
use OomError;
|
||||
use SynchronizedVulkanObject;
|
||||
use VulkanObject;
|
||||
use VulkanPointers;
|
||||
use check_errors;
|
||||
@ -64,14 +65,14 @@ pub unsafe trait AbstractImage: Resource + ::VulkanObjectU64 {
|
||||
///
|
||||
/// The function can return a semaphore which will be waited up by the GPU before the
|
||||
/// work starts.
|
||||
unsafe fn gpu_access(&self, write: bool, queue: &mut Queue, fence: Option<Arc<Fence>>,
|
||||
unsafe fn gpu_access(&self, write: bool, queue: &Arc<Queue>, fence: Option<Arc<Fence>>,
|
||||
semaphore: Option<Arc<Semaphore>>) -> Option<Arc<Semaphore>>;
|
||||
}
|
||||
|
||||
pub unsafe trait AbstractImageView: Resource + ::VulkanObjectU64 {
|
||||
fn default_layout(&self) -> Layout;
|
||||
|
||||
unsafe fn gpu_access(&self, write: bool, queue: &mut Queue, fence: Option<Arc<Fence>>,
|
||||
unsafe fn gpu_access(&self, write: bool, queue: &Arc<Queue>, fence: Option<Arc<Fence>>,
|
||||
semaphore: Option<Arc<Semaphore>>) -> Option<Arc<Semaphore>>;
|
||||
|
||||
/// True if the image can be used as a source for transfers.
|
||||
@ -441,7 +442,7 @@ unsafe impl<Ty, F, M> AbstractImage for Image<Ty, F, M>
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn gpu_access(&self, write: bool, queue: &mut Queue, fence: Option<Arc<Fence>>,
|
||||
unsafe fn gpu_access(&self, write: bool, queue: &Arc<Queue>, fence: Option<Arc<Fence>>,
|
||||
semaphore: Option<Arc<Semaphore>>) -> Option<Arc<Semaphore>>
|
||||
{
|
||||
// FIXME: if the image is in its initial transition phase, we need to a semaphore
|
||||
@ -507,7 +508,7 @@ impl<Ty, F, M> ImagePrototype<Ty, F, M>
|
||||
///
|
||||
/// - Panicks if `layout` is `Undefined` or `Preinitialized`.
|
||||
// FIXME: PresentSrc is only allowed for swapchain images
|
||||
pub fn transition(self, layout: Layout, pool: &CommandBufferPool, submit_queue: &mut Queue)
|
||||
pub fn transition(self, layout: Layout, pool: &CommandBufferPool, submit_queue: &Arc<Queue>)
|
||||
-> Result<Arc<Image<Ty, F, M>>, OomError> // FIXME: error type
|
||||
{
|
||||
// FIXME: check pool and submit queue correspondance
|
||||
@ -524,10 +525,12 @@ impl<Ty, F, M> ImagePrototype<Ty, F, M>
|
||||
|
||||
unsafe {
|
||||
let cmd = {
|
||||
let pool = pool.internal_object_guard();
|
||||
|
||||
let infos = vk::CommandBufferAllocateInfo {
|
||||
sType: vk::STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
|
||||
pNext: ptr::null(),
|
||||
commandPool: pool.internal_object(),
|
||||
commandPool: *pool,
|
||||
level: vk::COMMAND_BUFFER_LEVEL_SECONDARY,
|
||||
commandBufferCount: 1,
|
||||
};
|
||||
@ -588,7 +591,7 @@ impl<Ty, F, M> ImagePrototype<Ty, F, M>
|
||||
pSignalSemaphores: ptr::null(), // TODO:
|
||||
};
|
||||
|
||||
try!(check_errors(vk.QueueSubmit(submit_queue.internal_object(), 1, &infos, 0)));
|
||||
try!(check_errors(vk.QueueSubmit(*submit_queue.internal_object_guard(), 1, &infos, 0)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -769,7 +772,7 @@ unsafe impl<Ty, F, M> AbstractImageView for ImageView<Ty, F, M>
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn gpu_access(&self, write: bool, queue: &mut Queue, fence: Option<Arc<Fence>>,
|
||||
unsafe fn gpu_access(&self, write: bool, queue: &Arc<Queue>, fence: Option<Arc<Fence>>,
|
||||
semaphore: Option<Arc<Semaphore>>) -> Option<Arc<Semaphore>>
|
||||
{
|
||||
self.image.gpu_access(write, queue, fence, semaphore)
|
||||
|
@ -67,6 +67,7 @@ use std::error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::path::Path;
|
||||
use std::sync::MutexGuard;
|
||||
|
||||
mod vk {
|
||||
#![allow(dead_code)]
|
||||
@ -106,6 +107,15 @@ pub unsafe trait VulkanObject {
|
||||
fn internal_object(&self) -> Self::Object;
|
||||
}
|
||||
|
||||
/// Gives access to the internal identifier of an object.
|
||||
pub unsafe trait SynchronizedVulkanObject {
|
||||
/// The type of the object.
|
||||
type Object;
|
||||
|
||||
/// Returns a reference to the object.
|
||||
fn internal_object_guard(&self) -> MutexGuard<Self::Object>;
|
||||
}
|
||||
|
||||
// TODO: remove eventually
|
||||
// https://github.com/rust-lang/rust/issues/29328
|
||||
pub unsafe trait VulkanObjectU64 { fn internal_object(&self) -> u64; }
|
||||
|
@ -159,7 +159,7 @@ pub unsafe trait MemorySourceChunk {
|
||||
/// return a semaphore that must be waited upon by the GPU before the access can start. The
|
||||
/// semaphore being returned is usually one that has been previously passed to this function,
|
||||
/// but it doesn't need to be the case.
|
||||
unsafe fn gpu_access(&self, write: bool, range: ChunkRange, queue: &mut Queue,
|
||||
unsafe fn gpu_access(&self, write: bool, range: ChunkRange, queue: &Arc<Queue>,
|
||||
fence: Option<Arc<Fence>>, semaphore: Option<Arc<Semaphore>>)
|
||||
-> Option<Arc<Semaphore>>;
|
||||
|
||||
|
@ -72,7 +72,7 @@ pub struct DeviceLocalChunk {
|
||||
|
||||
unsafe impl MemorySourceChunk for DeviceLocalChunk {
|
||||
#[inline]
|
||||
unsafe fn gpu_access(&self, _write: bool, _range: ChunkRange, _: &mut Queue,
|
||||
unsafe fn gpu_access(&self, _write: bool, _range: ChunkRange, _: &Arc<Queue>,
|
||||
_: Option<Arc<Fence>>, mut semaphore: Option<Arc<Semaphore>>)
|
||||
-> Option<Arc<Semaphore>>
|
||||
{
|
||||
@ -153,7 +153,7 @@ pub struct HostVisibleChunk {
|
||||
|
||||
unsafe impl MemorySourceChunk for HostVisibleChunk {
|
||||
#[inline]
|
||||
unsafe fn gpu_access(&self, _write: bool, _range: ChunkRange, _: &mut Queue,
|
||||
unsafe fn gpu_access(&self, _write: bool, _range: ChunkRange, _: &Arc<Queue>,
|
||||
fence: Option<Arc<Fence>>, mut semaphore: Option<Arc<Semaphore>>)
|
||||
-> Option<Arc<Semaphore>>
|
||||
{
|
||||
|
@ -76,6 +76,7 @@ impl PipelineCache {
|
||||
///
|
||||
/// - Panicks if `self` is included in the list of other pipelines.
|
||||
///
|
||||
// FIXME: vkMergePipelineCaches is not thread safe for the destination cache
|
||||
pub fn merge<'a, I>(&self, pipelines: I) -> Result<(), OomError>
|
||||
where I: IntoIterator<Item = &'a &'a Arc<PipelineCache>>
|
||||
{
|
||||
|
@ -27,6 +27,7 @@ use check_errors;
|
||||
use Error;
|
||||
use OomError;
|
||||
use Success;
|
||||
use SynchronizedVulkanObject;
|
||||
use VulkanObject;
|
||||
use VulkanPointers;
|
||||
use vk;
|
||||
@ -202,7 +203,7 @@ impl Swapchain {
|
||||
///
|
||||
/// The actual behavior depends on the present mode that you passed when creating the
|
||||
/// swapchain.
|
||||
pub fn present(&self, queue: &mut Queue, index: usize) -> Result<(), OomError> { // FIXME: wrong error
|
||||
pub fn present(&self, queue: &Arc<Queue>, index: usize) -> Result<(), OomError> { // FIXME: wrong error
|
||||
let vk = self.device.pointers();
|
||||
|
||||
let wait_semaphore = {
|
||||
@ -227,7 +228,7 @@ impl Swapchain {
|
||||
pResults: &mut result,
|
||||
};
|
||||
|
||||
try!(check_errors(vk.QueuePresentKHR(queue.internal_object(), &infos)));
|
||||
try!(check_errors(vk.QueuePresentKHR(*queue.internal_object_guard(), &infos)));
|
||||
try!(check_errors(result));
|
||||
Ok(())
|
||||
}
|
||||
@ -303,7 +304,7 @@ unsafe impl MemorySourceChunk for SwapchainAllocatedChunk {
|
||||
fn may_alias(&self) -> bool { false }
|
||||
|
||||
#[inline]
|
||||
unsafe fn gpu_access(&self, _: bool, _: ChunkRange, _: &mut Queue, _: Option<Arc<Fence>>,
|
||||
unsafe fn gpu_access(&self, _: bool, _: ChunkRange, _: &Arc<Queue>, _: Option<Arc<Fence>>,
|
||||
post_semaphore: Option<Arc<Semaphore>>) -> Option<Arc<Semaphore>>
|
||||
{
|
||||
assert!(post_semaphore.is_some());
|
||||
|
@ -13,11 +13,13 @@ use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::MutexGuard;
|
||||
|
||||
use device::Device;
|
||||
use device::Queue;
|
||||
use OomError;
|
||||
use Success;
|
||||
use SynchronizedVulkanObject;
|
||||
use VulkanObject;
|
||||
use VulkanPointers;
|
||||
use check_errors;
|
||||
@ -54,19 +56,17 @@ pub enum SharingMode {
|
||||
Concurrent(Vec<u32>), // TODO: Vec is too expensive here
|
||||
}
|
||||
|
||||
impl<'a> From<&'a Arc<Mutex<Queue>>> for SharingMode {
|
||||
impl<'a> From<&'a Arc<Queue>> for SharingMode {
|
||||
#[inline]
|
||||
fn from(queue: &'a Arc<Mutex<Queue>>) -> SharingMode {
|
||||
let queue = queue.lock().unwrap(); // TODO: meh
|
||||
fn from(queue: &'a Arc<Queue>) -> SharingMode {
|
||||
SharingMode::Exclusive(queue.family().id())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a [&'a Arc<Mutex<Queue>>]> for SharingMode {
|
||||
impl<'a> From<&'a [&'a Arc<Queue>]> for SharingMode {
|
||||
#[inline]
|
||||
fn from(queues: &'a [&'a Arc<Mutex<Queue>>]) -> SharingMode {
|
||||
fn from(queues: &'a [&'a Arc<Queue>]) -> SharingMode {
|
||||
SharingMode::Concurrent(queues.iter().map(|queue| {
|
||||
let queue = queue.lock().unwrap(); // TODO: meh
|
||||
queue.family().id()
|
||||
}).collect())
|
||||
}
|
||||
@ -270,7 +270,7 @@ impl Drop for Semaphore {
|
||||
/// device loss.
|
||||
pub struct Event {
|
||||
device: Arc<Device>,
|
||||
event: vk::Event,
|
||||
event: Mutex<vk::Event>,
|
||||
}
|
||||
|
||||
impl Event {
|
||||
@ -295,7 +295,7 @@ impl Event {
|
||||
|
||||
Ok(Arc::new(Event {
|
||||
device: device.clone(),
|
||||
event: event,
|
||||
event: Mutex::new(event),
|
||||
}))
|
||||
}
|
||||
|
||||
@ -304,8 +304,9 @@ impl Event {
|
||||
pub fn signaled(&self) -> Result<bool, OomError> {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
let event = self.event.lock().unwrap();
|
||||
let result = try!(check_errors(vk.GetEventStatus(self.device.internal_object(),
|
||||
self.event)));
|
||||
*event)));
|
||||
match result {
|
||||
Success::EventSet => Ok(true),
|
||||
Success::EventReset => Ok(false),
|
||||
@ -321,7 +322,8 @@ impl Event {
|
||||
pub fn set(&self) -> Result<(), OomError> {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
try!(check_errors(vk.SetEvent(self.device.internal_object(), self.event)).map(|_| ()));
|
||||
let event = self.event.lock().unwrap();
|
||||
try!(check_errors(vk.SetEvent(self.device.internal_object(), *event)).map(|_| ()));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -331,18 +333,19 @@ impl Event {
|
||||
pub fn reset(&self) -> Result<(), OomError> {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
try!(check_errors(vk.ResetEvent(self.device.internal_object(), self.event)).map(|_| ()));
|
||||
let event = self.event.lock().unwrap();
|
||||
try!(check_errors(vk.ResetEvent(self.device.internal_object(), *event)).map(|_| ()));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl VulkanObject for Event {
|
||||
unsafe impl SynchronizedVulkanObject for Event {
|
||||
type Object = vk::Event;
|
||||
|
||||
#[inline]
|
||||
fn internal_object(&self) -> vk::Event {
|
||||
self.event
|
||||
fn internal_object_guard(&self) -> MutexGuard<vk::Event> {
|
||||
self.event.lock().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@ -351,7 +354,8 @@ impl Drop for Event {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let vk = self.device.pointers();
|
||||
vk.DestroyEvent(self.device.internal_object(), self.event, ptr::null());
|
||||
let event = self.event.lock().unwrap();
|
||||
vk.DestroyEvent(self.device.internal_object(), *event, ptr::null());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user