mirror of
https://github.com/vulkano-rs/vulkano.git
synced 2024-11-26 08:45:59 +00:00
Introduce SafeDeref
This commit is contained in:
parent
dcc45b4dce
commit
105b5bd45e
@ -77,7 +77,9 @@ pub mod sync;
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ops::Deref;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::sync::MutexGuard;
|
||||
|
||||
mod vk {
|
||||
@ -109,6 +111,11 @@ lazy_static! {
|
||||
};
|
||||
}
|
||||
|
||||
/// Alternative to the `Deref` trait. Contrary to `Deref`, must always return the same object.
|
||||
pub unsafe trait SafeDeref: Deref {}
|
||||
unsafe impl<'a, T: ?Sized> SafeDeref for &'a T {}
|
||||
unsafe impl<T: ?Sized> SafeDeref for Arc<T> {}
|
||||
|
||||
/// Gives access to the internal identifier of an object.
|
||||
pub unsafe trait VulkanObject {
|
||||
/// The type of the object.
|
||||
|
@ -19,6 +19,7 @@ use instance::MemoryType;
|
||||
use device::Device;
|
||||
use memory::Content;
|
||||
use OomError;
|
||||
use SafeDeref;
|
||||
use VulkanObject;
|
||||
use VulkanPointers;
|
||||
use check_errors;
|
||||
@ -26,15 +27,14 @@ use vk;
|
||||
|
||||
/// Represents memory that has been allocated.
|
||||
#[derive(Debug)]
|
||||
pub struct DeviceMemory<D = Arc<Device>> where D: Deref<Target = Device> {
|
||||
pub struct DeviceMemory<D = Arc<Device>> where D: SafeDeref<Target = Device> {
|
||||
memory: vk::DeviceMemory,
|
||||
device: D,
|
||||
device_raw: vk::Device,
|
||||
size: usize,
|
||||
memory_type_index: u32,
|
||||
}
|
||||
|
||||
impl<D> DeviceMemory<D> where D: Deref<Target = Device> {
|
||||
impl<D> DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
/// Allocates a chunk of memory from the device.
|
||||
///
|
||||
/// Some platforms may have a limit on the maximum size of a single allocation. For example,
|
||||
@ -47,12 +47,10 @@ impl<D> DeviceMemory<D> where D: Deref<Target = Device> {
|
||||
///
|
||||
// TODO: VK_ERROR_TOO_MANY_OBJECTS error
|
||||
#[inline]
|
||||
pub fn alloc(device_ptr: &D, memory_type: &MemoryType, size: usize)
|
||||
pub fn alloc(device: &D, memory_type: &MemoryType, size: usize)
|
||||
-> Result<DeviceMemory<D>, OomError>
|
||||
where D: Clone
|
||||
{
|
||||
let device: &Device = &**device_ptr;
|
||||
|
||||
assert!(size >= 1);
|
||||
assert_eq!(device.physical_device().internal_object(),
|
||||
memory_type.physical_device().internal_object());
|
||||
@ -79,8 +77,7 @@ impl<D> DeviceMemory<D> where D: Deref<Target = Device> {
|
||||
|
||||
Ok(DeviceMemory {
|
||||
memory: memory,
|
||||
device: device_ptr.clone(),
|
||||
device_raw: device.internal_object(),
|
||||
device: device.clone(),
|
||||
size: size,
|
||||
memory_type_index: memory_type.id(),
|
||||
})
|
||||
@ -93,16 +90,14 @@ impl<D> DeviceMemory<D> where D: Deref<Target = Device> {
|
||||
/// - Panicks if `memory_type` doesn't belong to the same physical device as `device`.
|
||||
/// - Panicks if the memory type is not host-visible.
|
||||
///
|
||||
pub fn alloc_and_map(device_ptr: &D, memory_type: &MemoryType, size: usize)
|
||||
pub fn alloc_and_map(device: &D, memory_type: &MemoryType, size: usize)
|
||||
-> Result<MappedDeviceMemory<D>, OomError>
|
||||
where D: Clone
|
||||
{
|
||||
let device: &Device = &**device_ptr;
|
||||
|
||||
let vk = device.pointers();
|
||||
|
||||
assert!(memory_type.is_host_visible());
|
||||
let mem = try!(DeviceMemory::alloc(device_ptr, memory_type, size)); // FIXME: shouldn't pass device for safety
|
||||
let mem = try!(DeviceMemory::alloc(device, memory_type, size));
|
||||
|
||||
let coherent = memory_type.is_host_coherent();
|
||||
|
||||
@ -124,9 +119,7 @@ impl<D> DeviceMemory<D> where D: Deref<Target = Device> {
|
||||
/// Returns the memory type this chunk was allocated on.
|
||||
#[inline]
|
||||
pub fn memory_type(&self) -> MemoryType {
|
||||
let device: &Device = &*self.device;
|
||||
assert_eq!(device.internal_object(), self.device_raw);
|
||||
device.physical_device().memory_type_by_id(self.memory_type_index).unwrap()
|
||||
self.device.physical_device().memory_type_by_id(self.memory_type_index).unwrap()
|
||||
}
|
||||
|
||||
/// Returns the size in bytes of that memory chunk.
|
||||
@ -138,13 +131,11 @@ impl<D> DeviceMemory<D> where D: Deref<Target = Device> {
|
||||
/// Returns the device associated with this allocation.
|
||||
#[inline]
|
||||
pub fn device(&self) -> &Device {
|
||||
let device: &Device = &*self.device;
|
||||
assert_eq!(device.internal_object(), self.device_raw);
|
||||
device
|
||||
&self.device
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> VulkanObject for DeviceMemory<D> where D: Deref<Target = Device> {
|
||||
unsafe impl<D> VulkanObject for DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
type Object = vk::DeviceMemory;
|
||||
|
||||
#[inline]
|
||||
@ -153,7 +144,7 @@ unsafe impl<D> VulkanObject for DeviceMemory<D> where D: Deref<Target = Device>
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Drop for DeviceMemory<D> where D: Deref<Target = Device> {
|
||||
impl<D> Drop for DeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
@ -166,13 +157,13 @@ impl<D> Drop for DeviceMemory<D> where D: Deref<Target = Device> {
|
||||
|
||||
/// Represents memory that has been allocated and mapped in CPU accessible space.
|
||||
#[derive(Debug)]
|
||||
pub struct MappedDeviceMemory<D = Arc<Device>> where D: Deref<Target = Device> {
|
||||
pub struct MappedDeviceMemory<D = Arc<Device>> where D: SafeDeref<Target = Device> {
|
||||
memory: DeviceMemory<D>,
|
||||
pointer: *mut c_void,
|
||||
coherent: bool,
|
||||
}
|
||||
|
||||
impl<D> MappedDeviceMemory<D> where D: Deref<Target = Device> {
|
||||
impl<D> MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
/// Returns the underlying `DeviceMemory`.
|
||||
// TODO: impl AsRef instead
|
||||
#[inline]
|
||||
@ -219,10 +210,10 @@ impl<D> MappedDeviceMemory<D> where D: Deref<Target = Device> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> Send for MappedDeviceMemory<D> where D: Deref<Target = Device> {}
|
||||
unsafe impl<D> Sync for MappedDeviceMemory<D> where D: Deref<Target = Device> {}
|
||||
unsafe impl<D> Send for MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {}
|
||||
unsafe impl<D> Sync for MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {}
|
||||
|
||||
impl<D> Drop for MappedDeviceMemory<D> where D: Deref<Target = Device> {
|
||||
impl<D> Drop for MappedDeviceMemory<D> where D: SafeDeref<Target = Device> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
@ -234,14 +225,14 @@ impl<D> Drop for MappedDeviceMemory<D> where D: Deref<Target = Device> {
|
||||
}
|
||||
|
||||
/// Object that can be used to read or write the content of a `MappedDeviceMemory`.
|
||||
pub struct CpuAccess<'a, T: ?Sized + 'a, D = Arc<Device>> where D: Deref<Target = Device> + 'a {
|
||||
pub struct CpuAccess<'a, T: ?Sized + 'a, D = Arc<Device>> where D: SafeDeref<Target = Device> + 'a {
|
||||
pointer: *mut T,
|
||||
mem: &'a MappedDeviceMemory<D>,
|
||||
coherent: bool,
|
||||
range: Range<usize>,
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> CpuAccess<'a, T, D> where D: Deref<Target = Device> {
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {
|
||||
/// Makes a new `CpuAccess` to access a sub-part of the current `CpuAccess`.
|
||||
#[inline]
|
||||
pub fn map<U: ?Sized + 'a, F>(self, f: F) -> CpuAccess<'a, U, D>
|
||||
@ -256,10 +247,10 @@ impl<'a, T: ?Sized + 'a, D: 'a> CpuAccess<'a, T, D> where D: Deref<Target = Devi
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T: ?Sized + 'a, D: 'a> Send for CpuAccess<'a, T, D> where D: Deref<Target = Device> {}
|
||||
unsafe impl<'a, T: ?Sized + 'a, D: 'a> Sync for CpuAccess<'a, T, D> where D: Deref<Target = Device> {}
|
||||
unsafe impl<'a, T: ?Sized + 'a, D: 'a> Send for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {}
|
||||
unsafe impl<'a, T: ?Sized + 'a, D: 'a> Sync for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {}
|
||||
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> Deref for CpuAccess<'a, T, D> where D: Deref<Target = Device> {
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> Deref for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {
|
||||
type Target = T;
|
||||
|
||||
#[inline]
|
||||
@ -268,14 +259,14 @@ impl<'a, T: ?Sized + 'a, D: 'a> Deref for CpuAccess<'a, T, D> where D: Deref<Tar
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> DerefMut for CpuAccess<'a, T, D> where D: Deref<Target = Device> {
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> DerefMut for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {
|
||||
#[inline]
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { &mut *self.pointer }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> Drop for CpuAccess<'a, T, D> where D: Deref<Target = Device> {
|
||||
impl<'a, T: ?Sized + 'a, D: 'a> Drop for CpuAccess<'a, T, D> where D: SafeDeref<Target = Device> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
// If the memory doesn't have the `coherent` flag, we need to flush the data.
|
||||
|
@ -10,7 +10,6 @@
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ops::Deref;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
@ -21,6 +20,7 @@ use smallvec::SmallVec;
|
||||
use device::Device;
|
||||
use Error;
|
||||
use OomError;
|
||||
use SafeDeref;
|
||||
use Success;
|
||||
use VulkanObject;
|
||||
use VulkanPointers;
|
||||
@ -33,11 +33,10 @@ use vk;
|
||||
/// the same ressource simultaneously (except for concurrent reads). Therefore in order to know
|
||||
/// when the CPU can access a ressource again, a fence has to be used.
|
||||
#[derive(Debug)]
|
||||
pub struct Fence<D = Arc<Device>> where D: Deref<Target = Device> {
|
||||
pub struct Fence<D = Arc<Device>> where D: SafeDeref<Target = Device> {
|
||||
fence: vk::Fence,
|
||||
|
||||
device: D,
|
||||
device_raw: vk::Device,
|
||||
|
||||
// If true, we know that the `Fence` is signaled. If false, we don't know.
|
||||
// This variable exists so that we don't need to call `vkGetFenceStatus` or `vkWaitForFences`
|
||||
@ -45,7 +44,7 @@ pub struct Fence<D = Arc<Device>> where D: Deref<Target = Device> {
|
||||
signaled: AtomicBool,
|
||||
}
|
||||
|
||||
impl<D> Fence<D> where D: Deref<Target = Device> {
|
||||
impl<D> Fence<D> where D: SafeDeref<Target = Device> {
|
||||
/// See the docs of new().
|
||||
#[inline]
|
||||
pub fn raw(device: &D) -> Result<Fence<D>, OomError>
|
||||
@ -88,10 +87,9 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
|
||||
Arc::new(Fence::signaled_raw(device).unwrap())
|
||||
}
|
||||
|
||||
fn new_impl(device_ptr: &D, signaled: bool) -> Result<Fence<D>, OomError>
|
||||
fn new_impl(device: &D, signaled: bool) -> Result<Fence<D>, OomError>
|
||||
where D: Clone
|
||||
{
|
||||
let device: &Device = &*device_ptr;
|
||||
let vk = device.pointers();
|
||||
|
||||
let fence = unsafe {
|
||||
@ -109,8 +107,7 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
|
||||
|
||||
Ok(Fence {
|
||||
fence: fence,
|
||||
device: device_ptr.clone(),
|
||||
device_raw: device.internal_object(),
|
||||
device: device.clone(),
|
||||
signaled: AtomicBool::new(signaled),
|
||||
})
|
||||
}
|
||||
@ -119,13 +116,10 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
|
||||
#[inline]
|
||||
pub fn ready(&self) -> Result<bool, OomError> {
|
||||
unsafe {
|
||||
let device: &Device = &*self.device;
|
||||
assert_eq!(device.internal_object(), self.device_raw);
|
||||
|
||||
if self.signaled.load(Ordering::Relaxed) { return Ok(true); }
|
||||
|
||||
let vk = device.pointers();
|
||||
let result = try!(check_errors(vk.GetFenceStatus(device.internal_object(),
|
||||
let vk = self.device.pointers();
|
||||
let result = try!(check_errors(vk.GetFenceStatus(self.device.internal_object(),
|
||||
self.fence)));
|
||||
match result {
|
||||
Success::Success => {
|
||||
@ -144,16 +138,13 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
|
||||
/// Returns `Ok` if the fence is now signaled. Returns `Err` if the timeout was reached instead.
|
||||
pub fn wait(&self, timeout: Duration) -> Result<(), FenceWaitError> {
|
||||
unsafe {
|
||||
let device: &Device = &*self.device;
|
||||
assert_eq!(device.internal_object(), self.device_raw);
|
||||
|
||||
if self.signaled.load(Ordering::Relaxed) { return Ok(()); }
|
||||
|
||||
let timeout_ns = timeout.as_secs().saturating_mul(1_000_000_000)
|
||||
.saturating_add(timeout.subsec_nanos() as u64);
|
||||
|
||||
let vk = device.pointers();
|
||||
let r = try!(check_errors(vk.WaitForFences(device.internal_object(), 1,
|
||||
let vk = self.device.pointers();
|
||||
let r = try!(check_errors(vk.WaitForFences(self.device.internal_object(), 1,
|
||||
&self.fence, vk::TRUE, timeout_ns)));
|
||||
|
||||
match r {
|
||||
@ -219,11 +210,8 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
|
||||
#[inline]
|
||||
pub fn reset(&self) {
|
||||
unsafe {
|
||||
let device: &Device = &*self.device;
|
||||
assert_eq!(device.internal_object(), self.device_raw);
|
||||
|
||||
let vk = device.pointers();
|
||||
vk.ResetFences(device.internal_object(), 1, &self.fence);
|
||||
let vk = self.device.pointers();
|
||||
vk.ResetFences(self.device.internal_object(), 1, &self.fence);
|
||||
self.signaled.store(false, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
@ -258,7 +246,7 @@ impl<D> Fence<D> where D: Deref<Target = Device> {
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<D> VulkanObject for Fence<D> where D: Deref<Target = Device> {
|
||||
unsafe impl<D> VulkanObject for Fence<D> where D: SafeDeref<Target = Device> {
|
||||
type Object = vk::Fence;
|
||||
|
||||
#[inline]
|
||||
@ -267,15 +255,12 @@ unsafe impl<D> VulkanObject for Fence<D> where D: Deref<Target = Device> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<D> Drop for Fence<D> where D: Deref<Target = Device> {
|
||||
impl<D> Drop for Fence<D> where D: SafeDeref<Target = Device> {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let device: &Device = &*self.device;
|
||||
assert_eq!(device.internal_object(), self.device_raw);
|
||||
|
||||
let vk = device.pointers();
|
||||
vk.DestroyFence(device.internal_object(), self.fence, ptr::null());
|
||||
let vk = self.device.pointers();
|
||||
vk.DestroyFence(self.device.internal_object(), self.fence, ptr::null());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user