Introduce SafeDeref

This commit is contained in:
Pierre Krieger 2016-04-22 13:10:19 +02:00
parent dcc45b4dce
commit 105b5bd45e
3 changed files with 45 additions and 62 deletions

View File

@ -77,7 +77,9 @@ pub mod sync;
use std::error; use std::error;
use std::fmt; use std::fmt;
use std::mem; use std::mem;
use std::ops::Deref;
use std::path::Path; use std::path::Path;
use std::sync::Arc;
use std::sync::MutexGuard; use std::sync::MutexGuard;
mod vk { mod vk {
@ -109,6 +111,11 @@ lazy_static! {
}; };
} }
/// Alternative to the `Deref` trait. Contrary to `Deref`, must always return the same object.
pub unsafe trait SafeDeref: Deref {}
unsafe impl<'a, T: ?Sized> SafeDeref for &'a T {}
unsafe impl<T: ?Sized> SafeDeref for Arc<T> {}
/// Gives access to the internal identifier of an object. /// Gives access to the internal identifier of an object.
pub unsafe trait VulkanObject { pub unsafe trait VulkanObject {
/// The type of the object. /// The type of the object.

View File

@ -19,6 +19,7 @@ use instance::MemoryType;
use device::Device; use device::Device;
use memory::Content; use memory::Content;
use OomError; use OomError;
use SafeDeref;
use VulkanObject; use VulkanObject;
use VulkanPointers; use VulkanPointers;
use check_errors; use check_errors;
@ -26,15 +27,14 @@ use vk;
/// Represents memory that has been allocated. /// Represents memory that has been allocated.
#[derive(Debug)] #[derive(Debug)]
pub struct DeviceMemory<D = Arc<Device>> where D: Deref<Target = Device> { pub struct DeviceMemory<D = Arc<Device>> where D: SafeDeref<Target = Device> {
memory: vk::DeviceMemory, memory: vk::DeviceMemory,
device: D, device: D,
device_raw: vk::Device,
size: usize, size: usize,
memory_type_index: u32, memory_type_index: u32,
} }
impl<D> DeviceMemory<D> where D: Deref<Target = Device> { impl<D> DeviceMemory<D> where D: SafeDeref<Target = Device> {
/// Allocates a chunk of memory from the device. /// Allocates a chunk of memory from the device.
/// ///
/// Some platforms may have a limit on the maximum size of a single allocation. For example, /// Some platforms may have a limit on the maximum size of a single allocation. For example,
@ -47,12 +47,10 @@ impl<D> DeviceMemory<D> where D: Deref<Target = Device> {
/// ///
// TODO: VK_ERROR_TOO_MANY_OBJECTS error // TODO: VK_ERROR_TOO_MANY_OBJECTS error
#[inline] #[inline]
pub fn alloc(device_ptr: &D, memory_type: &MemoryType, size: usize) pub fn alloc(device: &D, memory_type: &MemoryType, size: usize)
-> Result<DeviceMemory<D>, OomError> -> Result<DeviceMemory<D>, OomError>
where D: Clone where D: Clone
{ {
let device: &Device = &**device_ptr;
assert!(size >= 1); assert!(size >= 1);
assert_eq!(device.physical_device().internal_object(), assert_eq!(device.physical_device().internal_object(),
memory_type.physical_device().internal_object()); memory_type.physical_device().internal_object());
@ -79,8 +77,7 @@ impl<D> DeviceMemory<D> where D: Deref<Target = Device> {
Ok(DeviceMemory { Ok(DeviceMemory {
memory: memory, memory: memory,
device: device_ptr.clone(), device: device.clone(),
device_raw: device.internal_object(),
size: size, size: size,
memory_type_index: memory_type.id(), memory_type_index: memory_type.id(),
}) })
@ -93,16 +90,14 @@ impl<D> DeviceMemory<D> where D: Deref<Target = Device> {
/// - Panicks if `memory_type` doesn't belong to the same physical device as `device`. /// - Panicks if `memory_type` doesn't belong to the same physical device as `device`.
/// - Panicks if the memory type is not host-visible. /// - Panicks if the memory type is not host-visible.
/// ///
pub fn alloc_and_map(device_ptr: &D, memory_type: &MemoryType, size: usize) pub fn alloc_and_map(device: &D, memory_type: &MemoryType, size: usize)
-> Result<MappedDeviceMemory<D>, OomError> -> Result<MappedDeviceMemory<D>, OomError>
where D: Clone where D: Clone
{ {
let device: &Device = &**device_ptr;
let vk = device.pointers(); let vk = device.pointers();
assert!(memory_type.is_host_visible()); assert!(memory_type.is_host_visible());
let mem = try!(DeviceMemory::alloc(device_ptr, memory_type, size)); // FIXME: shouldn't pass device for safety let mem = try!(DeviceMemory::alloc(device, memory_type, size));
let coherent = memory_type.is_host_coherent(); let coherent = memory_type.is_host_coherent();
@ -124,9 +119,7 @@ impl<D> DeviceMemory<D> where D: Deref<Target = Device> {
/// Returns the memory type this chunk was allocated on. /// Returns the memory type this chunk was allocated on.
#[inline] #[inline]
pub fn memory_type(&self) -> MemoryType { pub fn memory_type(&self) -> MemoryType {
let device: &Device = &*self.device; self.device.physical_device().memory_type_by_id(self.memory_type_index).unwrap()
assert_eq!(device.internal_object(), self.device_raw);
device.physical_device().memory_type_by_id(self.memory_type_index).unwrap()
} }
/// Returns the size in bytes of that memory chunk. /// Returns the size in bytes of that memory chunk.
@ -138,13 +131,11 @@ impl<D> DeviceMemory<D> where D: Deref<Target = Device> {
/// Returns the device associated with this allocation. /// Returns the device associated with this allocation.
#[inline] #[inline]
pub fn device(&self) -> &Device { pub fn device(&self) -> &Device {
let device: &Device = &*self.device; &self.device
assert_eq!(device.internal_object(), self.device_raw);
device
} }
} }
unsafe impl<D> VulkanObject for DeviceMemory<D> where D: Deref<Target = Device> { unsafe impl<D> VulkanObject for DeviceMemory<D> where D: SafeDeref<Target = Device> {
type Object = vk::DeviceMemory; type Object = vk::DeviceMemory;
#[inline] #[inline]
@ -153,7 +144,7 @@ unsafe impl<D> VulkanObject for DeviceMemory<D> where D: Deref<Target = Device>
} }
} }
impl<D> Drop for DeviceMemory<D> where D: Deref<Target = Device> { impl<D> Drop for DeviceMemory<D> where D: SafeDeref<Target = Device> {
#[inline] #[inline]
fn drop(&mut self) { fn drop(&mut self) {
unsafe { unsafe {
@ -166,13 +157,13 @@ impl<D> Drop for DeviceMemory<D> where D: Deref<Target = Device> {
/// Represents memory that has been allocated and mapped in CPU accessible space. /// Represents memory that has been allocated and mapped in CPU accessible space.
#[derive(Debug)] #[derive(Debug)]
pub struct MappedDeviceMemory<D = Arc<Device>> where D: Deref<Target = Device> { pub struct MappedDeviceMemory<D = Arc<Device>> where D: SafeDeref<Target = Device> {
memory: DeviceMemory<D>, memory: DeviceMemory<D>,
pointer: *mut c_void, pointer: *mut c_void,
coherent: bool, coherent: bool,
} }
impl<D> MappedDeviceMemory<D> where D: Deref<Target = Device> { impl<D> MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {
/// Returns the underlying `DeviceMemory`. /// Returns the underlying `DeviceMemory`.
// TODO: impl AsRef instead // TODO: impl AsRef instead
#[inline] #[inline]
@ -219,10 +210,10 @@ impl<D> MappedDeviceMemory<D> where D: Deref<Target = Device> {
} }
} }
unsafe impl<D> Send for MappedDeviceMemory<D> where D: Deref<Target = Device> {} unsafe impl<D> Send for MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {}
unsafe impl<D> Sync for MappedDeviceMemory<D> where D: Deref<Target = Device> {} unsafe impl<D> Sync for MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {}
impl<D> Drop for MappedDeviceMemory<D> where D: Deref<Target = Device> { impl<D> Drop for MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {
#[inline] #[inline]
fn drop(&mut self) { fn drop(&mut self) {
unsafe { unsafe {
@ -234,14 +225,14 @@ impl<D> Drop for MappedDeviceMemory<D> where D: Deref<Target = Device> {
} }
/// Object that can be used to read or write the content of a `MappedDeviceMemory`. /// Object that can be used to read or write the content of a `MappedDeviceMemory`.
pub struct CpuAccess<'a, T: ?Sized + 'a, D = Arc<Device>> where D: Deref<Target = Device> + 'a { pub struct CpuAccess<'a, T: ?Sized + 'a, D = Arc<Device>> where D: SafeDeref<Target = Device> + 'a {
pointer: *mut T, pointer: *mut T,
mem: &'a MappedDeviceMemory<D>, mem: &'a MappedDeviceMemory<D>,
coherent: bool, coherent: bool,
range: Range<usize>, range: Range<usize>,
} }
impl<'a, T: ?Sized + 'a, D: 'a> CpuAccess<'a, T, D> where D: Deref<Target = Device> { impl<'a, T: ?Sized + 'a, D: 'a> CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {
/// Makes a new `CpuAccess` to access a sub-part of the current `CpuAccess`. /// Makes a new `CpuAccess` to access a sub-part of the current `CpuAccess`.
#[inline] #[inline]
pub fn map<U: ?Sized + 'a, F>(self, f: F) -> CpuAccess<'a, U, D> pub fn map<U: ?Sized + 'a, F>(self, f: F) -> CpuAccess<'a, U, D>
@ -256,10 +247,10 @@ impl<'a, T: ?Sized + 'a, D: 'a> CpuAccess<'a, T, D> where D: Deref<Target = Devi
} }
} }
unsafe impl<'a, T: ?Sized + 'a, D: 'a> Send for CpuAccess<'a, T, D> where D: Deref<Target = Device> {} unsafe impl<'a, T: ?Sized + 'a, D: 'a> Send for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {}
unsafe impl<'a, T: ?Sized + 'a, D: 'a> Sync for CpuAccess<'a, T, D> where D: Deref<Target = Device> {} unsafe impl<'a, T: ?Sized + 'a, D: 'a> Sync for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {}
impl<'a, T: ?Sized + 'a, D: 'a> Deref for CpuAccess<'a, T, D> where D: Deref<Target = Device> { impl<'a, T: ?Sized + 'a, D: 'a> Deref for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {
type Target = T; type Target = T;
#[inline] #[inline]
@ -268,14 +259,14 @@ impl<'a, T: ?Sized + 'a, D: 'a> Deref for CpuAccess<'a, T, D> where D: Deref<Tar
} }
} }
impl<'a, T: ?Sized + 'a, D: 'a> DerefMut for CpuAccess<'a, T, D> where D: Deref<Target = Device> { impl<'a, T: ?Sized + 'a, D: 'a> DerefMut for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {
#[inline] #[inline]
fn deref_mut(&mut self) -> &mut T { fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.pointer } unsafe { &mut *self.pointer }
} }
} }
impl<'a, T: ?Sized + 'a, D: 'a> Drop for CpuAccess<'a, T, D> where D: Deref<Target = Device> { impl<'a, T: ?Sized + 'a, D: 'a> Drop for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {
#[inline] #[inline]
fn drop(&mut self) { fn drop(&mut self) {
// If the memory doesn't have the `coherent` flag, we need to flush the data. // If the memory doesn't have the `coherent` flag, we need to flush the data.

View File

@ -10,7 +10,6 @@
use std::error; use std::error;
use std::fmt; use std::fmt;
use std::mem; use std::mem;
use std::ops::Deref;
use std::ptr; use std::ptr;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
@ -21,6 +20,7 @@ use smallvec::SmallVec;
use device::Device; use device::Device;
use Error; use Error;
use OomError; use OomError;
use SafeDeref;
use Success; use Success;
use VulkanObject; use VulkanObject;
use VulkanPointers; use VulkanPointers;
@ -33,11 +33,10 @@ use vk;
/// the same ressource simultaneously (except for concurrent reads). Therefore in order to know /// the same ressource simultaneously (except for concurrent reads). Therefore in order to know
/// when the CPU can access a ressource again, a fence has to be used. /// when the CPU can access a ressource again, a fence has to be used.
#[derive(Debug)] #[derive(Debug)]
pub struct Fence<D = Arc<Device>> where D: Deref<Target = Device> { pub struct Fence<D = Arc<Device>> where D: SafeDeref<Target = Device> {
fence: vk::Fence, fence: vk::Fence,
device: D, device: D,
device_raw: vk::Device,
// If true, we know that the `Fence` is signaled. If false, we don't know. // If true, we know that the `Fence` is signaled. If false, we don't know.
// This variable exists so that we don't need to call `vkGetFenceStatus` or `vkWaitForFences` // This variable exists so that we don't need to call `vkGetFenceStatus` or `vkWaitForFences`
@ -45,7 +44,7 @@ pub struct Fence<D = Arc<Device>> where D: Deref<Target = Device> {
signaled: AtomicBool, signaled: AtomicBool,
} }
impl<D> Fence<D> where D: Deref<Target = Device> { impl<D> Fence<D> where D: SafeDeref<Target = Device> {
/// See the docs of new(). /// See the docs of new().
#[inline] #[inline]
pub fn raw(device: &D) -> Result<Fence<D>, OomError> pub fn raw(device: &D) -> Result<Fence<D>, OomError>
@ -88,10 +87,9 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
Arc::new(Fence::signaled_raw(device).unwrap()) Arc::new(Fence::signaled_raw(device).unwrap())
} }
fn new_impl(device_ptr: &D, signaled: bool) -> Result<Fence<D>, OomError> fn new_impl(device: &D, signaled: bool) -> Result<Fence<D>, OomError>
where D: Clone where D: Clone
{ {
let device: &Device = &*device_ptr;
let vk = device.pointers(); let vk = device.pointers();
let fence = unsafe { let fence = unsafe {
@ -109,8 +107,7 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
Ok(Fence { Ok(Fence {
fence: fence, fence: fence,
device: device_ptr.clone(), device: device.clone(),
device_raw: device.internal_object(),
signaled: AtomicBool::new(signaled), signaled: AtomicBool::new(signaled),
}) })
} }
@ -119,13 +116,10 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
#[inline] #[inline]
pub fn ready(&self) -> Result<bool, OomError> { pub fn ready(&self) -> Result<bool, OomError> {
unsafe { unsafe {
let device: &Device = &*self.device;
assert_eq!(device.internal_object(), self.device_raw);
if self.signaled.load(Ordering::Relaxed) { return Ok(true); } if self.signaled.load(Ordering::Relaxed) { return Ok(true); }
let vk = device.pointers(); let vk = self.device.pointers();
let result = try!(check_errors(vk.GetFenceStatus(device.internal_object(), let result = try!(check_errors(vk.GetFenceStatus(self.device.internal_object(),
self.fence))); self.fence)));
match result { match result {
Success::Success => { Success::Success => {
@ -144,16 +138,13 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
/// Returns `Ok` if the fence is now signaled. Returns `Err` if the timeout was reached instead. /// Returns `Ok` if the fence is now signaled. Returns `Err` if the timeout was reached instead.
pub fn wait(&self, timeout: Duration) -> Result<(), FenceWaitError> { pub fn wait(&self, timeout: Duration) -> Result<(), FenceWaitError> {
unsafe { unsafe {
let device: &Device = &*self.device;
assert_eq!(device.internal_object(), self.device_raw);
if self.signaled.load(Ordering::Relaxed) { return Ok(()); } if self.signaled.load(Ordering::Relaxed) { return Ok(()); }
let timeout_ns = timeout.as_secs().saturating_mul(1_000_000_000) let timeout_ns = timeout.as_secs().saturating_mul(1_000_000_000)
.saturating_add(timeout.subsec_nanos() as u64); .saturating_add(timeout.subsec_nanos() as u64);
let vk = device.pointers(); let vk = self.device.pointers();
let r = try!(check_errors(vk.WaitForFences(device.internal_object(), 1, let r = try!(check_errors(vk.WaitForFences(self.device.internal_object(), 1,
&self.fence, vk::TRUE, timeout_ns))); &self.fence, vk::TRUE, timeout_ns)));
match r { match r {
@ -219,11 +210,8 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
#[inline] #[inline]
pub fn reset(&self) { pub fn reset(&self) {
unsafe { unsafe {
let device: &Device = &*self.device; let vk = self.device.pointers();
assert_eq!(device.internal_object(), self.device_raw); vk.ResetFences(self.device.internal_object(), 1, &self.fence);
let vk = device.pointers();
vk.ResetFences(device.internal_object(), 1, &self.fence);
self.signaled.store(false, Ordering::Relaxed); self.signaled.store(false, Ordering::Relaxed);
} }
} }
@ -258,7 +246,7 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
} }
} }
unsafe impl<D> VulkanObject for Fence<D> where D: Deref<Target = Device> { unsafe impl<D> VulkanObject for Fence<D> where D: SafeDeref<Target = Device> {
type Object = vk::Fence; type Object = vk::Fence;
#[inline] #[inline]
@ -267,15 +255,12 @@ unsafe impl<D> VulkanObject for Fence<D> where D: Deref<Target = Device> {
} }
} }
impl<D> Drop for Fence<D> where D: Deref<Target = Device> { impl<D> Drop for Fence<D> where D: SafeDeref<Target = Device> {
#[inline] #[inline]
fn drop(&mut self) { fn drop(&mut self) {
unsafe { unsafe {
let device: &Device = &*self.device; let vk = self.device.pointers();
assert_eq!(device.internal_object(), self.device_raw); vk.DestroyFence(self.device.internal_object(), self.fence, ptr::null());
let vk = device.pointers();
vk.DestroyFence(device.internal_object(), self.fence, ptr::null());
} }
} }
} }