mirror of
https://github.com/vulkano-rs/vulkano.git
synced 2024-11-21 22:34:43 +00:00
Merge MappedDeviceMemory
into DeviceMemory
, make MemoryAlloc
reuse the logic (#2300)
* Merge `MappedDeviceMemory` into `DeviceMemory` * Fix soundness and utilize new `DeviceMemory` methods in `MemoryAlloc` * Fix oopsies * `Sync`ness * `#[inline]` * Big oopsie * Language * Sanity check for the deprecated stuff * Full `khr_map_memory2` * Missed trait impls * `MemoryUnmapInfo::validate` * Document mapping behavior of `GenericMemoryAllocator` * Validate flags * Update vulkano/src/memory/allocator/mod.rs Co-authored-by: Rua <ruawhitepaw@gmail.com> * Remove flags * Errors --------- Co-authored-by: Rua <ruawhitepaw@gmail.com>
This commit is contained in:
parent
015c057fa1
commit
de7b6a91a0
@ -159,7 +159,7 @@ mod linux {
|
|||||||
|
|
||||||
let image = Arc::new(
|
let image = Arc::new(
|
||||||
raw_image
|
raw_image
|
||||||
.bind_memory([MemoryAlloc::new(image_memory).unwrap()])
|
.bind_memory([MemoryAlloc::new(image_memory)])
|
||||||
.map_err(|(err, _, _)| err)
|
.map_err(|(err, _, _)| err)
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
);
|
);
|
||||||
|
@ -73,7 +73,7 @@ pub fn derive_buffer_contents(mut ast: DeriveInput) -> Result<TokenStream> {
|
|||||||
const LAYOUT: ::#crate_ident::buffer::BufferContentsLayout = #layout;
|
const LAYOUT: ::#crate_ident::buffer::BufferContentsLayout = #layout;
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
unsafe fn from_ffi(data: *mut ::std::ffi::c_void, range: usize) -> *mut Self {
|
unsafe fn ptr_from_slice(slice: ::std::ptr::NonNull<[u8]>) -> *mut Self {
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
union PtrRepr<T: ?Sized> {
|
union PtrRepr<T: ?Sized> {
|
||||||
components: PtrComponents,
|
components: PtrComponents,
|
||||||
@ -83,14 +83,11 @@ pub fn derive_buffer_contents(mut ast: DeriveInput) -> Result<TokenStream> {
|
|||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
#[repr(C)]
|
#[repr(C)]
|
||||||
struct PtrComponents {
|
struct PtrComponents {
|
||||||
data: *mut ::std::ffi::c_void,
|
data: *mut u8,
|
||||||
len: usize,
|
len: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
let alignment = <Self as ::#crate_ident::buffer::BufferContents>::LAYOUT
|
let data = <*mut [u8]>::cast::<u8>(slice.as_ptr());
|
||||||
.alignment()
|
|
||||||
.as_devicesize() as usize;
|
|
||||||
::std::debug_assert!(data as usize % alignment == 0);
|
|
||||||
|
|
||||||
let head_size = <Self as ::#crate_ident::buffer::BufferContents>::LAYOUT
|
let head_size = <Self as ::#crate_ident::buffer::BufferContents>::LAYOUT
|
||||||
.head_size() as usize;
|
.head_size() as usize;
|
||||||
@ -98,8 +95,8 @@ pub fn derive_buffer_contents(mut ast: DeriveInput) -> Result<TokenStream> {
|
|||||||
.element_size()
|
.element_size()
|
||||||
.unwrap_or(1) as usize;
|
.unwrap_or(1) as usize;
|
||||||
|
|
||||||
::std::debug_assert!(range >= head_size);
|
::std::debug_assert!(slice.len() >= head_size);
|
||||||
let tail_size = range - head_size;
|
let tail_size = slice.len() - head_size;
|
||||||
::std::debug_assert!(tail_size % element_size == 0);
|
::std::debug_assert!(tail_size % element_size == 0);
|
||||||
let len = tail_size / element_size;
|
let len = tail_size / element_size;
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ use crate::{
|
|||||||
memory::{
|
memory::{
|
||||||
self,
|
self,
|
||||||
allocator::{align_down, align_up, DeviceLayout},
|
allocator::{align_down, align_up, DeviceLayout},
|
||||||
is_aligned, DeviceAlignment,
|
is_aligned, DeviceAlignment, MappedMemoryRange,
|
||||||
},
|
},
|
||||||
sync::HostAccessError,
|
sync::HostAccessError,
|
||||||
DeviceSize, NonZeroDeviceSize, ValidationError,
|
DeviceSize, NonZeroDeviceSize, ValidationError,
|
||||||
@ -25,7 +25,6 @@ use bytemuck::AnyBitPattern;
|
|||||||
use std::{
|
use std::{
|
||||||
alloc::Layout,
|
alloc::Layout,
|
||||||
cmp,
|
cmp,
|
||||||
ffi::c_void,
|
|
||||||
hash::{Hash, Hasher},
|
hash::{Hash, Hasher},
|
||||||
marker::PhantomData,
|
marker::PhantomData,
|
||||||
mem::{self, align_of, size_of},
|
mem::{self, align_of, size_of},
|
||||||
@ -119,16 +118,23 @@ impl<T: ?Sized> Subbuffer<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the mapped pointer to the start of the subbuffer if the memory is host-visible,
|
/// Returns the mapped pointer to the range of memory of `self`.
|
||||||
/// otherwise returns [`None`].
|
///
|
||||||
pub fn mapped_ptr(&self) -> Option<NonNull<c_void>> {
|
/// The subbuffer must fall within the range of the memory mapping given to
|
||||||
|
/// [`DeviceMemory::map`].
|
||||||
|
///
|
||||||
|
/// See [`MappingState::slice`] for the safety invariants of the returned pointer.
|
||||||
|
///
|
||||||
|
/// [`DeviceMemory::map`]: crate::memory::DeviceMemory::map
|
||||||
|
/// [`MappingState::slice`]: crate::memory::MappingState::slice
|
||||||
|
pub fn mapped_slice(&self) -> Result<NonNull<[u8]>, HostAccessError> {
|
||||||
match self.buffer().memory() {
|
match self.buffer().memory() {
|
||||||
BufferMemory::Normal(a) => a.mapped_ptr().map(|ptr| {
|
BufferMemory::Normal(a) => {
|
||||||
// SAFETY: The original address came from the Vulkan implementation, and allocation
|
let opt = a.mapped_slice(self.range());
|
||||||
// sizes are guaranteed to not exceed `isize::MAX` when there's a mapped pointer,
|
|
||||||
// so the offset better be in range.
|
// SAFETY: `self.range()` is in bounds of the allocation.
|
||||||
unsafe { NonNull::new_unchecked(ptr.as_ptr().add(self.offset as usize)) }
|
unsafe { opt.unwrap_unchecked() }
|
||||||
}),
|
}
|
||||||
BufferMemory::Sparse => unreachable!(),
|
BufferMemory::Sparse => unreachable!(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -327,27 +333,33 @@ where
|
|||||||
.map_err(HostAccessError::AccessConflict)?;
|
.map_err(HostAccessError::AccessConflict)?;
|
||||||
unsafe { state.cpu_read_lock(range.clone()) };
|
unsafe { state.cpu_read_lock(range.clone()) };
|
||||||
|
|
||||||
|
let mapped_slice = self.mapped_slice()?;
|
||||||
|
|
||||||
if allocation.atom_size().is_some() {
|
if allocation.atom_size().is_some() {
|
||||||
// If there are other read locks being held at this point, they also called
|
let memory_range = MappedMemoryRange {
|
||||||
// `invalidate_range` when locking. The GPU can't write data while the CPU holds a read
|
offset: range.start,
|
||||||
// lock, so there will be no new data and this call will do nothing.
|
size: range.end - range.start,
|
||||||
// TODO: probably still more efficient to call it only if we're the first to acquire a
|
_ne: crate::NonExhaustive(()),
|
||||||
// read lock, but the number of CPU locks isn't currently tracked anywhere.
|
|
||||||
unsafe {
|
|
||||||
allocation
|
|
||||||
.invalidate_range(range.clone())
|
|
||||||
.map_err(HostAccessError::Invalidate)?
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// If there are other read locks being held at this point, they also called
|
||||||
|
// `invalidate_range_unchecked` when locking. The device can't write data while the
|
||||||
|
// host holds a read lock, so there will be no new data and this call will do nothing.
|
||||||
|
// TODO: probably still more efficient to call it only if we're the first to acquire a
|
||||||
|
// read lock, but the number of host locks isn't currently tracked anywhere.
|
||||||
|
//
|
||||||
|
// SAFETY:
|
||||||
|
// - `self.mapped_slice()` didn't return an error, which means that the subbuffer falls
|
||||||
|
// within the mapped range of the memory.
|
||||||
|
// - We ensure that memory mappings are always aligned to the non-coherent atom size
|
||||||
|
// for non-host-coherent memory, therefore the subbuffer's range aligned to the
|
||||||
|
// non-coherent atom size must fall within the mapped range of the memory.
|
||||||
|
unsafe { allocation.invalidate_range_unchecked(memory_range) }
|
||||||
|
.map_err(HostAccessError::Invalidate)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mapped_ptr = self.mapped_ptr().ok_or_else(|| {
|
|
||||||
HostAccessError::ValidationError(Box::new(ValidationError {
|
|
||||||
problem: "the memory of this subbuffer is not host-visible".into(),
|
|
||||||
..Default::default()
|
|
||||||
}))
|
|
||||||
})?;
|
|
||||||
// SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`.
|
// SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`.
|
||||||
let data = unsafe { &*T::from_ffi(mapped_ptr.as_ptr(), self.size as usize) };
|
let data = unsafe { &*T::ptr_from_slice(mapped_slice) };
|
||||||
|
|
||||||
Ok(BufferReadGuard {
|
Ok(BufferReadGuard {
|
||||||
subbuffer: self,
|
subbuffer: self,
|
||||||
@ -407,22 +419,27 @@ where
|
|||||||
.map_err(HostAccessError::AccessConflict)?;
|
.map_err(HostAccessError::AccessConflict)?;
|
||||||
unsafe { state.cpu_write_lock(range.clone()) };
|
unsafe { state.cpu_write_lock(range.clone()) };
|
||||||
|
|
||||||
|
let mapped_slice = self.mapped_slice()?;
|
||||||
|
|
||||||
if allocation.atom_size().is_some() {
|
if allocation.atom_size().is_some() {
|
||||||
unsafe {
|
let memory_range = MappedMemoryRange {
|
||||||
allocation
|
offset: range.start,
|
||||||
.invalidate_range(range.clone())
|
size: range.end - range.start,
|
||||||
.map_err(HostAccessError::Invalidate)?
|
_ne: crate::NonExhaustive(()),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// SAFETY:
|
||||||
|
// - `self.mapped_slice()` didn't return an error, which means that the subbuffer falls
|
||||||
|
// within the mapped range of the memory.
|
||||||
|
// - We ensure that memory mappings are always aligned to the non-coherent atom size
|
||||||
|
// for non-host-coherent memory, therefore the subbuffer's range aligned to the
|
||||||
|
// non-coherent atom size must fall within the mapped range of the memory.
|
||||||
|
unsafe { allocation.invalidate_range_unchecked(memory_range) }
|
||||||
|
.map_err(HostAccessError::Invalidate)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mapped_ptr = self.mapped_ptr().ok_or_else(|| {
|
|
||||||
HostAccessError::ValidationError(Box::new(ValidationError {
|
|
||||||
problem: "the memory of this subbuffer is not host-visible".into(),
|
|
||||||
..Default::default()
|
|
||||||
}))
|
|
||||||
})?;
|
|
||||||
// SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`.
|
// SAFETY: `Subbuffer` guarantees that its contents are laid out correctly for `T`.
|
||||||
let data = unsafe { &mut *T::from_ffi(mapped_ptr.as_ptr(), self.size as usize) };
|
let data = unsafe { &mut *T::ptr_from_slice(mapped_slice) };
|
||||||
|
|
||||||
Ok(BufferWriteGuard {
|
Ok(BufferWriteGuard {
|
||||||
subbuffer: self,
|
subbuffer: self,
|
||||||
@ -661,7 +678,13 @@ impl<T: ?Sized> Drop for BufferWriteGuard<'_, T> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if allocation.atom_size().is_some() && !thread::panicking() {
|
if allocation.atom_size().is_some() && !thread::panicking() {
|
||||||
unsafe { allocation.flush_range(self.range.clone()).unwrap() };
|
let memory_range = MappedMemoryRange {
|
||||||
|
offset: self.range.start,
|
||||||
|
size: self.range.end - self.range.start,
|
||||||
|
_ne: crate::NonExhaustive(()),
|
||||||
|
};
|
||||||
|
|
||||||
|
unsafe { allocation.flush_range_unchecked(memory_range).unwrap() };
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut state = self.subbuffer.buffer().state();
|
let mut state = self.subbuffer.buffer().state();
|
||||||
@ -777,24 +800,24 @@ impl<T: ?Sized> DerefMut for BufferWriteGuard<'_, T> {
|
|||||||
// - `LAYOUT` must be the correct layout for the type, which also means the type must either be
|
// - `LAYOUT` must be the correct layout for the type, which also means the type must either be
|
||||||
// sized or if it's unsized then its metadata must be the same as that of a slice. Implementing
|
// sized or if it's unsized then its metadata must be the same as that of a slice. Implementing
|
||||||
// `BufferContents` for any other kind of DST is instantaneous horrifically undefined behavior.
|
// `BufferContents` for any other kind of DST is instantaneous horrifically undefined behavior.
|
||||||
// - `from_ffi` must create a pointer with the same address as the `data` parameter that is passed
|
// - `ptr_from_slice` must create a pointer with the same address as the `slice` parameter that is
|
||||||
// in. The pointer is expected to be aligned properly already.
|
// passed in. The pointer is expected to be aligned properly already.
|
||||||
// - `from_ffi` must create a pointer that is expected to be valid for reads (and potentially
|
// - `ptr_from_slice` must create a pointer that is expected to be valid for reads (and potentially
|
||||||
// writes) for exactly `range` bytes. The `data` and `range` are expected to be valid for the
|
// writes) for exactly `slice.len()` bytes. The `slice.len()` is expected to be valid for the
|
||||||
// `LAYOUT`.
|
// `LAYOUT`.
|
||||||
pub unsafe trait BufferContents: Send + Sync + 'static {
|
pub unsafe trait BufferContents: Send + Sync + 'static {
|
||||||
/// The layout of the contents.
|
/// The layout of the contents.
|
||||||
const LAYOUT: BufferContentsLayout;
|
const LAYOUT: BufferContentsLayout;
|
||||||
|
|
||||||
/// Creates a pointer to `Self` from a pointer to the start of the data and a range in bytes.
|
/// Creates a pointer to `Self` from a pointer to a range of mapped memory.
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// - If `Self` is sized, then `range` must match the size exactly.
|
/// - If `Self` is sized, then `slice.len()` must match the size exactly.
|
||||||
/// - If `Self` is unsized, then the `range` minus the size of the head (sized part) of the DST
|
/// - If `Self` is unsized, then `slice.len()` minus the size of the head (sized part) of the
|
||||||
/// must be evenly divisible by the size of the element type.
|
/// DST must be evenly divisible by the size of the element type.
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self;
|
unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T> BufferContents for T
|
unsafe impl<T> BufferContents for T
|
||||||
@ -809,11 +832,10 @@ where
|
|||||||
};
|
};
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self {
|
unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self {
|
||||||
debug_assert!(range == size_of::<T>());
|
debug_assert!(slice.len() == size_of::<T>());
|
||||||
debug_assert!(data as usize % align_of::<T>() == 0);
|
|
||||||
|
|
||||||
data.cast()
|
<*mut [u8]>::cast::<T>(slice.as_ptr())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -827,12 +849,12 @@ where
|
|||||||
});
|
});
|
||||||
|
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self {
|
unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self {
|
||||||
debug_assert!(range % size_of::<T>() == 0);
|
let data = <*mut [u8]>::cast::<T>(slice.as_ptr());
|
||||||
debug_assert!(data as usize % align_of::<T>() == 0);
|
let len = slice.len() / size_of::<T>();
|
||||||
let len = range / size_of::<T>();
|
debug_assert!(slice.len() % size_of::<T>() == 0);
|
||||||
|
|
||||||
ptr::slice_from_raw_parts_mut(data.cast(), len)
|
ptr::slice_from_raw_parts_mut(data, len)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -229,7 +229,7 @@ pub use self::{
|
|||||||
};
|
};
|
||||||
use super::{
|
use super::{
|
||||||
DedicatedAllocation, DeviceAlignment, DeviceMemory, ExternalMemoryHandleTypes,
|
DedicatedAllocation, DeviceAlignment, DeviceMemory, ExternalMemoryHandleTypes,
|
||||||
MemoryAllocateFlags, MemoryAllocateInfo, MemoryProperties, MemoryPropertyFlags,
|
MemoryAllocateFlags, MemoryAllocateInfo, MemoryMapInfo, MemoryProperties, MemoryPropertyFlags,
|
||||||
MemoryRequirements, MemoryType,
|
MemoryRequirements, MemoryType,
|
||||||
};
|
};
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -381,7 +381,7 @@ pub unsafe trait MemoryAllocator: DeviceOwned {
|
|||||||
allocation_size: DeviceSize,
|
allocation_size: DeviceSize,
|
||||||
dedicated_allocation: Option<DedicatedAllocation<'_>>,
|
dedicated_allocation: Option<DedicatedAllocation<'_>>,
|
||||||
export_handle_types: ExternalMemoryHandleTypes,
|
export_handle_types: ExternalMemoryHandleTypes,
|
||||||
) -> Result<MemoryAlloc, MemoryAllocatorError>;
|
) -> Result<MemoryAlloc, VulkanError>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Describes what memory property flags are required, preferred and not preferred when picking a
|
/// Describes what memory property flags are required, preferred and not preferred when picking a
|
||||||
@ -812,6 +812,13 @@ impl StandardMemoryAllocator {
|
|||||||
///
|
///
|
||||||
/// See also [the `MemoryAllocator` implementation].
|
/// See also [the `MemoryAllocator` implementation].
|
||||||
///
|
///
|
||||||
|
/// # Mapping behavior
|
||||||
|
///
|
||||||
|
/// Every time a new `DeviceMemory` block is allocated, it is mapped in full automatically as long
|
||||||
|
/// as it resides in host-visible memory. It remains mapped until it is dropped, which only happens
|
||||||
|
/// if the allocator is dropped. In other words, all eligible blocks are persistently mapped, so
|
||||||
|
/// you don't need to worry about whether or not your host-visible allocations are host-accessible.
|
||||||
|
///
|
||||||
/// # `DeviceMemory` allocation
|
/// # `DeviceMemory` allocation
|
||||||
///
|
///
|
||||||
/// If an allocation is created with the [`MemoryAllocatePreference::Unknown`] option, and the
|
/// If an allocation is created with the [`MemoryAllocatePreference::Unknown`] option, and the
|
||||||
@ -1176,7 +1183,7 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
|
|||||||
.property_flags
|
.property_flags
|
||||||
.contains(ash::vk::MemoryPropertyFlags::LAZILY_ALLOCATED)
|
.contains(ash::vk::MemoryPropertyFlags::LAZILY_ALLOCATED)
|
||||||
{
|
{
|
||||||
return unsafe {
|
let allocation = unsafe {
|
||||||
self.allocate_dedicated_unchecked(
|
self.allocate_dedicated_unchecked(
|
||||||
memory_type_index,
|
memory_type_index,
|
||||||
create_info.layout.size(),
|
create_info.layout.size(),
|
||||||
@ -1187,7 +1194,9 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
|
|||||||
ExternalMemoryHandleTypes::empty()
|
ExternalMemoryHandleTypes::empty()
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
};
|
}?;
|
||||||
|
|
||||||
|
return Ok(allocation);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe { self.allocate_from_type_unchecked(memory_type_index, create_info, false) }
|
unsafe { self.allocate_from_type_unchecked(memory_type_index, create_info, false) }
|
||||||
@ -1305,17 +1314,16 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
|
|||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let allocate_info = MemoryAllocateInfo {
|
let allocation_size = block_size >> i;
|
||||||
allocation_size: block_size >> i,
|
|
||||||
|
match self.allocate_dedicated_unchecked(
|
||||||
memory_type_index,
|
memory_type_index,
|
||||||
|
allocation_size,
|
||||||
|
None,
|
||||||
export_handle_types,
|
export_handle_types,
|
||||||
dedicated_allocation: None,
|
) {
|
||||||
flags: self.flags,
|
Ok(allocation) => {
|
||||||
..Default::default()
|
break S::new(allocation);
|
||||||
};
|
|
||||||
match DeviceMemory::allocate_unchecked(self.device.clone(), allocate_info, None) {
|
|
||||||
Ok(device_memory) => {
|
|
||||||
break S::new(MemoryAlloc::new(device_memory)?);
|
|
||||||
}
|
}
|
||||||
// Retry up to 3 times, halving the allocation size each time.
|
// Retry up to 3 times, halving the allocation size each time.
|
||||||
Err(VulkanError::OutOfHostMemory | VulkanError::OutOfDeviceMemory) if i < 3 => {
|
Err(VulkanError::OutOfHostMemory | VulkanError::OutOfDeviceMemory) if i < 3 => {
|
||||||
@ -1459,6 +1467,7 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
|
|||||||
dedicated_allocation,
|
dedicated_allocation,
|
||||||
export_handle_types,
|
export_handle_types,
|
||||||
)
|
)
|
||||||
|
.map_err(MemoryAllocatorError::VulkanError)
|
||||||
} else {
|
} else {
|
||||||
if size > block_size / 2 {
|
if size > block_size / 2 {
|
||||||
prefers_dedicated_allocation = true;
|
prefers_dedicated_allocation = true;
|
||||||
@ -1476,6 +1485,7 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
|
|||||||
dedicated_allocation,
|
dedicated_allocation,
|
||||||
export_handle_types,
|
export_handle_types,
|
||||||
)
|
)
|
||||||
|
.map_err(MemoryAllocatorError::VulkanError)
|
||||||
// Fall back to suballocation.
|
// Fall back to suballocation.
|
||||||
.or_else(|err| {
|
.or_else(|err| {
|
||||||
if size <= block_size {
|
if size <= block_size {
|
||||||
@ -1504,6 +1514,7 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
|
|||||||
dedicated_allocation,
|
dedicated_allocation,
|
||||||
export_handle_types,
|
export_handle_types,
|
||||||
)
|
)
|
||||||
|
.map_err(MemoryAllocatorError::VulkanError)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1515,12 +1526,14 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
|
|||||||
|
|
||||||
self.allocate_from_type_unchecked(memory_type_index, create_info.clone(), true)
|
self.allocate_from_type_unchecked(memory_type_index, create_info.clone(), true)
|
||||||
}
|
}
|
||||||
MemoryAllocatePreference::AlwaysAllocate => self.allocate_dedicated_unchecked(
|
MemoryAllocatePreference::AlwaysAllocate => self
|
||||||
|
.allocate_dedicated_unchecked(
|
||||||
memory_type_index,
|
memory_type_index,
|
||||||
size,
|
size,
|
||||||
dedicated_allocation,
|
dedicated_allocation,
|
||||||
export_handle_types,
|
export_handle_types,
|
||||||
),
|
)
|
||||||
|
.map_err(MemoryAllocatorError::VulkanError),
|
||||||
};
|
};
|
||||||
|
|
||||||
match res {
|
match res {
|
||||||
@ -1546,20 +1559,40 @@ unsafe impl<S: Suballocator> MemoryAllocator for GenericMemoryAllocator<S> {
|
|||||||
allocation_size: DeviceSize,
|
allocation_size: DeviceSize,
|
||||||
dedicated_allocation: Option<DedicatedAllocation<'_>>,
|
dedicated_allocation: Option<DedicatedAllocation<'_>>,
|
||||||
export_handle_types: ExternalMemoryHandleTypes,
|
export_handle_types: ExternalMemoryHandleTypes,
|
||||||
) -> Result<MemoryAlloc, MemoryAllocatorError> {
|
) -> Result<MemoryAlloc, VulkanError> {
|
||||||
let allocate_info = MemoryAllocateInfo {
|
// SAFETY: The caller must uphold the safety contract.
|
||||||
|
let mut memory = DeviceMemory::allocate_unchecked(
|
||||||
|
self.device.clone(),
|
||||||
|
MemoryAllocateInfo {
|
||||||
allocation_size,
|
allocation_size,
|
||||||
memory_type_index,
|
memory_type_index,
|
||||||
dedicated_allocation,
|
dedicated_allocation,
|
||||||
export_handle_types,
|
export_handle_types,
|
||||||
flags: self.flags,
|
flags: self.flags,
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
},
|
||||||
let mut allocation = MemoryAlloc::new(DeviceMemory::allocate_unchecked(
|
|
||||||
self.device.clone(),
|
|
||||||
allocate_info,
|
|
||||||
None,
|
None,
|
||||||
)?)?;
|
)?;
|
||||||
|
|
||||||
|
if self.pools[memory_type_index as usize]
|
||||||
|
.memory_type
|
||||||
|
.property_flags
|
||||||
|
.intersects(ash::vk::MemoryPropertyFlags::HOST_VISIBLE)
|
||||||
|
{
|
||||||
|
// SAFETY:
|
||||||
|
// - We checked that the memory is host-visible.
|
||||||
|
// - The memory can't be mapped already, because we just allocated it.
|
||||||
|
// - Mapping the whole range is always valid.
|
||||||
|
memory.map_unchecked(MemoryMapInfo {
|
||||||
|
offset: 0,
|
||||||
|
size: memory.allocation_size(),
|
||||||
|
_ne: crate::NonExhaustive(()),
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut allocation = MemoryAlloc::new(memory);
|
||||||
|
|
||||||
|
// SAFETY: The memory is freshly allocated.
|
||||||
allocation.set_allocation_type(self.allocation_type);
|
allocation.set_allocation_type(self.allocation_type);
|
||||||
|
|
||||||
Ok(allocation)
|
Ok(allocation)
|
||||||
@ -1628,7 +1661,7 @@ unsafe impl<T: MemoryAllocator> MemoryAllocator for Arc<T> {
|
|||||||
allocation_size: DeviceSize,
|
allocation_size: DeviceSize,
|
||||||
dedicated_allocation: Option<DedicatedAllocation<'_>>,
|
dedicated_allocation: Option<DedicatedAllocation<'_>>,
|
||||||
export_handle_types: ExternalMemoryHandleTypes,
|
export_handle_types: ExternalMemoryHandleTypes,
|
||||||
) -> Result<MemoryAlloc, MemoryAllocatorError> {
|
) -> Result<MemoryAlloc, VulkanError> {
|
||||||
(**self).allocate_dedicated_unchecked(
|
(**self).allocate_dedicated_unchecked(
|
||||||
memory_type_index,
|
memory_type_index,
|
||||||
allocation_size,
|
allocation_size,
|
||||||
|
@ -14,14 +14,13 @@
|
|||||||
//! [the parent module]: super
|
//! [the parent module]: super
|
||||||
|
|
||||||
use self::host::SlotId;
|
use self::host::SlotId;
|
||||||
use super::{
|
use super::{align_down, align_up, array_vec::ArrayVec, DeviceAlignment, DeviceLayout};
|
||||||
align_down, align_up, array_vec::ArrayVec, DeviceAlignment, DeviceLayout, MemoryAllocatorError,
|
|
||||||
};
|
|
||||||
use crate::{
|
use crate::{
|
||||||
device::{Device, DeviceOwned},
|
device::{Device, DeviceOwned},
|
||||||
image::ImageTiling,
|
image::ImageTiling,
|
||||||
memory::{is_aligned, DeviceMemory, MemoryPropertyFlags},
|
memory::{self, is_aligned, DeviceMemory, MappedMemoryRange, MemoryPropertyFlags},
|
||||||
DeviceSize, NonZeroDeviceSize, VulkanError, VulkanObject,
|
sync::HostAccessError,
|
||||||
|
DeviceSize, NonZeroDeviceSize, Validated, ValidationError, VulkanError,
|
||||||
};
|
};
|
||||||
use crossbeam_queue::ArrayQueue;
|
use crossbeam_queue::ArrayQueue;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
@ -29,12 +28,10 @@ use std::{
|
|||||||
cell::Cell,
|
cell::Cell,
|
||||||
cmp,
|
cmp,
|
||||||
error::Error,
|
error::Error,
|
||||||
ffi::c_void,
|
|
||||||
fmt::{self, Display},
|
fmt::{self, Display},
|
||||||
mem::{self, ManuallyDrop, MaybeUninit},
|
mem::{self, ManuallyDrop},
|
||||||
ops::Range,
|
ops::RangeBounds,
|
||||||
ptr::{self, NonNull},
|
ptr::{self, NonNull},
|
||||||
slice,
|
|
||||||
sync::{
|
sync::{
|
||||||
atomic::{AtomicU64, Ordering},
|
atomic::{AtomicU64, Ordering},
|
||||||
Arc,
|
Arc,
|
||||||
@ -57,8 +54,6 @@ pub struct MemoryAlloc {
|
|||||||
size: DeviceSize,
|
size: DeviceSize,
|
||||||
// Needed when binding resources to the allocation in order to avoid aliasing memory.
|
// Needed when binding resources to the allocation in order to avoid aliasing memory.
|
||||||
allocation_type: AllocationType,
|
allocation_type: AllocationType,
|
||||||
// Mapped pointer to the start of the allocation or `None` is the memory is not host-visible.
|
|
||||||
mapped_ptr: Option<NonNull<c_void>>,
|
|
||||||
// Used by the suballocators to align allocations to the non-coherent atom size when the memory
|
// Used by the suballocators to align allocations to the non-coherent atom size when the memory
|
||||||
// type is host-visible but not host-coherent. This will be `None` for any other memory type.
|
// type is host-visible but not host-coherent. This will be `None` for any other memory type.
|
||||||
atom_size: Option<DeviceAlignment>,
|
atom_size: Option<DeviceAlignment>,
|
||||||
@ -86,17 +81,10 @@ enum AllocParent {
|
|||||||
Dedicated(DeviceMemory),
|
Dedicated(DeviceMemory),
|
||||||
}
|
}
|
||||||
|
|
||||||
// It is safe to share `mapped_ptr` between threads because the user would have to use unsafe code
|
|
||||||
// themself to get UB in the first place.
|
|
||||||
unsafe impl Send for MemoryAlloc {}
|
|
||||||
unsafe impl Sync for MemoryAlloc {}
|
|
||||||
|
|
||||||
impl MemoryAlloc {
|
impl MemoryAlloc {
|
||||||
/// Creates a new `MemoryAlloc`.
|
/// Creates a new `MemoryAlloc`.
|
||||||
///
|
|
||||||
/// The memory is mapped automatically if it's host-visible.
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn new(device_memory: DeviceMemory) -> Result<Self, MemoryAllocatorError> {
|
pub fn new(device_memory: DeviceMemory) -> Self {
|
||||||
// Sanity check: this would lead to UB when suballocating.
|
// Sanity check: this would lead to UB when suballocating.
|
||||||
assert!(device_memory.allocation_size() <= DeviceLayout::MAX_SIZE);
|
assert!(device_memory.allocation_size() <= DeviceLayout::MAX_SIZE);
|
||||||
|
|
||||||
@ -107,47 +95,21 @@ impl MemoryAlloc {
|
|||||||
[memory_type_index as usize]
|
[memory_type_index as usize]
|
||||||
.property_flags;
|
.property_flags;
|
||||||
|
|
||||||
let mapped_ptr = if property_flags.intersects(MemoryPropertyFlags::HOST_VISIBLE) {
|
|
||||||
// Sanity check: this would lead to UB when calculating pointer offsets.
|
|
||||||
assert!(device_memory.allocation_size() <= isize::MAX.try_into().unwrap());
|
|
||||||
|
|
||||||
let fns = device.fns();
|
|
||||||
let mut output = MaybeUninit::uninit();
|
|
||||||
// This is always valid because we are mapping the whole range.
|
|
||||||
unsafe {
|
|
||||||
(fns.v1_0.map_memory)(
|
|
||||||
device.handle(),
|
|
||||||
device_memory.handle(),
|
|
||||||
0,
|
|
||||||
ash::vk::WHOLE_SIZE,
|
|
||||||
ash::vk::MemoryMapFlags::empty(),
|
|
||||||
output.as_mut_ptr(),
|
|
||||||
)
|
|
||||||
.result()
|
|
||||||
.map_err(VulkanError::from)?;
|
|
||||||
|
|
||||||
Some(NonNull::new(output.assume_init()).unwrap())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let atom_size = (property_flags.intersects(MemoryPropertyFlags::HOST_VISIBLE)
|
let atom_size = (property_flags.intersects(MemoryPropertyFlags::HOST_VISIBLE)
|
||||||
&& !property_flags.intersects(MemoryPropertyFlags::HOST_COHERENT))
|
&& !property_flags.intersects(MemoryPropertyFlags::HOST_COHERENT))
|
||||||
.then_some(physical_device.properties().non_coherent_atom_size);
|
.then_some(physical_device.properties().non_coherent_atom_size);
|
||||||
|
|
||||||
Ok(MemoryAlloc {
|
MemoryAlloc {
|
||||||
offset: 0,
|
offset: 0,
|
||||||
size: device_memory.allocation_size(),
|
size: device_memory.allocation_size(),
|
||||||
allocation_type: AllocationType::Unknown,
|
allocation_type: AllocationType::Unknown,
|
||||||
mapped_ptr,
|
|
||||||
atom_size,
|
atom_size,
|
||||||
parent: if device_memory.is_dedicated() {
|
parent: if device_memory.is_dedicated() {
|
||||||
AllocParent::Dedicated(device_memory)
|
AllocParent::Dedicated(device_memory)
|
||||||
} else {
|
} else {
|
||||||
AllocParent::Root(Arc::new(device_memory))
|
AllocParent::Root(Arc::new(device_memory))
|
||||||
},
|
},
|
||||||
})
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the offset of the allocation within the [`DeviceMemory`] block.
|
/// Returns the offset of the allocation within the [`DeviceMemory`] block.
|
||||||
@ -168,37 +130,31 @@ impl MemoryAlloc {
|
|||||||
self.allocation_type
|
self.allocation_type
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the mapped pointer to the start of the allocation if the memory is host-visible,
|
/// Returns the mapped pointer to a range of the allocation, or returns [`None`] if ouf of
|
||||||
/// otherwise returns [`None`].
|
/// bounds.
|
||||||
|
///
|
||||||
|
/// `range` is specified in bytes relative to the beginning of `self` and must fall within the
|
||||||
|
/// range of the memory mapping given to [`DeviceMemory::map`].
|
||||||
|
///
|
||||||
|
/// See [`MappingState::slice`] for the safety invariants of the returned pointer.
|
||||||
|
///
|
||||||
|
/// [`MappingState::slice`]: crate::memory::MappingState::slice
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn mapped_ptr(&self) -> Option<NonNull<c_void>> {
|
pub fn mapped_slice(
|
||||||
self.mapped_ptr
|
&self,
|
||||||
}
|
range: impl RangeBounds<DeviceSize>,
|
||||||
|
) -> Option<Result<NonNull<[u8]>, HostAccessError>> {
|
||||||
|
let mut range = memory::range(range, ..self.size())?;
|
||||||
|
range.start += self.offset();
|
||||||
|
range.end += self.offset();
|
||||||
|
|
||||||
/// Returns a mapped slice to the data within the allocation if the memory is host-visible,
|
let res = if let Some(state) = self.device_memory().mapping_state() {
|
||||||
/// otherwise returns [`None`].
|
state.slice(range).ok_or(HostAccessError::OutOfMappedRange)
|
||||||
///
|
} else {
|
||||||
/// # Safety
|
Err(HostAccessError::NotHostMapped)
|
||||||
///
|
};
|
||||||
/// - While the returned slice exists, there must be no operations pending or executing in a
|
|
||||||
/// GPU queue that write to the same memory.
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn mapped_slice(&self) -> Option<&[u8]> {
|
|
||||||
self.mapped_ptr
|
|
||||||
.map(|ptr| slice::from_raw_parts(ptr.as_ptr().cast(), self.size as usize))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a mapped mutable slice to the data within the allocation if the memory is
|
Some(res)
|
||||||
/// host-visible, otherwise returns [`None`].
|
|
||||||
///
|
|
||||||
/// # Safety
|
|
||||||
///
|
|
||||||
/// - While the returned slice exists, there must be no operations pending or executing in a
|
|
||||||
/// GPU queue that access the same memory.
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn mapped_slice_mut(&mut self) -> Option<&mut [u8]> {
|
|
||||||
self.mapped_ptr
|
|
||||||
.map(|ptr| slice::from_raw_parts_mut(ptr.as_ptr().cast(), self.size as usize))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn atom_size(&self) -> Option<DeviceAlignment> {
|
pub(crate) fn atom_size(&self) -> Option<DeviceAlignment> {
|
||||||
@ -207,142 +163,122 @@ impl MemoryAlloc {
|
|||||||
|
|
||||||
/// Invalidates the host (CPU) cache for a range of the allocation.
|
/// Invalidates the host (CPU) cache for a range of the allocation.
|
||||||
///
|
///
|
||||||
/// You must call this method before the memory is read by the host, if the device previously
|
/// If the device memory is not [host-coherent], you must call this function before the memory
|
||||||
/// wrote to the memory. It has no effect if the memory is not mapped or if the memory is
|
/// is read by the host, if the device previously wrote to the memory. It has no effect if the
|
||||||
/// [host-coherent].
|
/// memory is host-coherent.
|
||||||
///
|
|
||||||
/// `range` is specified in bytes relative to the start of the allocation. The start and end of
|
|
||||||
/// `range` must be a multiple of the [`non_coherent_atom_size`] device property, but
|
|
||||||
/// `range.end` can also equal to `self.size()`.
|
|
||||||
///
|
///
|
||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// - If there are memory writes by the GPU that have not been propagated into the CPU cache,
|
/// - If there are memory writes by the device that have not been propagated into the host
|
||||||
/// then there must not be any references in Rust code to the specified `range` of the memory.
|
/// cache, then there must not be any references in Rust code to any portion of the specified
|
||||||
///
|
/// `memory_range`.
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// - Panics if `range` is empty.
|
|
||||||
/// - Panics if `range.end` exceeds `self.size`.
|
|
||||||
/// - Panics if `range.start` or `range.end` are not a multiple of the `non_coherent_atom_size`.
|
|
||||||
///
|
///
|
||||||
/// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
|
/// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
|
||||||
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
|
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn invalidate_range(&self, range: Range<DeviceSize>) -> Result<(), VulkanError> {
|
pub unsafe fn invalidate_range(
|
||||||
// VUID-VkMappedMemoryRange-memory-00684
|
|
||||||
if let Some(atom_size) = self.atom_size {
|
|
||||||
let range = self.create_memory_range(range, atom_size);
|
|
||||||
let device = self.device();
|
|
||||||
let fns = device.fns();
|
|
||||||
(fns.v1_0.invalidate_mapped_memory_ranges)(device.handle(), 1, &range)
|
|
||||||
.result()
|
|
||||||
.map_err(VulkanError::from)?;
|
|
||||||
} else {
|
|
||||||
self.debug_validate_memory_range(&range);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Flushes the host (CPU) cache for a range of the allocation.
|
|
||||||
///
|
|
||||||
/// You must call this method after writing to the memory from the host, if the device is going
|
|
||||||
/// to read the memory. It has no effect if the memory is not mapped or if the memory is
|
|
||||||
/// [host-coherent].
|
|
||||||
///
|
|
||||||
/// `range` is specified in bytes relative to the start of the allocation. The start and end of
|
|
||||||
/// `range` must be a multiple of the [`non_coherent_atom_size`] device property, but
|
|
||||||
/// `range.end` can also equal to `self.size()`.
|
|
||||||
///
|
|
||||||
/// # Safety
|
|
||||||
///
|
|
||||||
/// - There must be no operations pending or executing in a GPU queue that access the specified
|
|
||||||
/// `range` of the memory.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// - Panics if `range` is empty.
|
|
||||||
/// - Panics if `range.end` exceeds `self.size`.
|
|
||||||
/// - Panics if `range.start` or `range.end` are not a multiple of the `non_coherent_atom_size`.
|
|
||||||
///
|
|
||||||
/// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
|
|
||||||
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
|
|
||||||
#[inline]
|
|
||||||
pub unsafe fn flush_range(&self, range: Range<DeviceSize>) -> Result<(), VulkanError> {
|
|
||||||
// VUID-VkMappedMemoryRange-memory-00684
|
|
||||||
if let Some(atom_size) = self.atom_size {
|
|
||||||
let range = self.create_memory_range(range, atom_size);
|
|
||||||
let device = self.device();
|
|
||||||
let fns = device.fns();
|
|
||||||
(fns.v1_0.flush_mapped_memory_ranges)(device.handle(), 1, &range)
|
|
||||||
.result()
|
|
||||||
.map_err(VulkanError::from)?;
|
|
||||||
} else {
|
|
||||||
self.debug_validate_memory_range(&range);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn create_memory_range(
|
|
||||||
&self,
|
&self,
|
||||||
range: Range<DeviceSize>,
|
memory_range: MappedMemoryRange,
|
||||||
atom_size: DeviceAlignment,
|
) -> Result<(), Validated<VulkanError>> {
|
||||||
) -> ash::vk::MappedMemoryRange {
|
self.validate_memory_range(&memory_range)?;
|
||||||
assert!(!range.is_empty() && range.end <= self.size);
|
|
||||||
|
|
||||||
// VUID-VkMappedMemoryRange-size-00685
|
self.device_memory()
|
||||||
// Guaranteed because we always map the entire `DeviceMemory`.
|
.invalidate_range(self.create_memory_range(memory_range))
|
||||||
|
|
||||||
// VUID-VkMappedMemoryRange-offset-00687
|
|
||||||
// VUID-VkMappedMemoryRange-size-01390
|
|
||||||
assert!(
|
|
||||||
is_aligned(range.start, atom_size)
|
|
||||||
&& (is_aligned(range.end, atom_size) || range.end == self.size)
|
|
||||||
);
|
|
||||||
|
|
||||||
// VUID-VkMappedMemoryRange-offset-00687
|
|
||||||
// Guaranteed as long as `range.start` is aligned because the suballocators always align
|
|
||||||
// `self.offset` to the non-coherent atom size for non-coherent host-visible memory.
|
|
||||||
let offset = self.offset + range.start;
|
|
||||||
|
|
||||||
let mut size = range.end - range.start;
|
|
||||||
let device_memory = self.device_memory();
|
|
||||||
|
|
||||||
// VUID-VkMappedMemoryRange-size-01390
|
|
||||||
if offset + size < device_memory.allocation_size() {
|
|
||||||
// We align the size in case `range.end == self.size`. We can do this without aliasing
|
|
||||||
// other allocations because the suballocators ensure that all allocations are aligned
|
|
||||||
// to the atom size for non-coherent host-visible memory.
|
|
||||||
size = align_up(size, atom_size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ash::vk::MappedMemoryRange {
|
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
|
||||||
memory: device_memory.handle(),
|
#[inline]
|
||||||
|
pub unsafe fn invalidate_range_unchecked(
|
||||||
|
&self,
|
||||||
|
memory_range: MappedMemoryRange,
|
||||||
|
) -> Result<(), VulkanError> {
|
||||||
|
self.device_memory()
|
||||||
|
.invalidate_range_unchecked(self.create_memory_range(memory_range))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flushes the host cache for a range of the allocation.
|
||||||
|
///
|
||||||
|
/// If the device memory is not [host-coherent], you must call this function after writing to
|
||||||
|
/// the memory, if the device is going to read the memory. It has no effect if the memory is
|
||||||
|
/// host-coherent.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// - There must be no operations pending or executing in a device queue, that access the
|
||||||
|
/// specified `memory_range`.
|
||||||
|
///
|
||||||
|
/// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
|
||||||
|
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn flush_range(
|
||||||
|
&self,
|
||||||
|
memory_range: MappedMemoryRange,
|
||||||
|
) -> Result<(), Validated<VulkanError>> {
|
||||||
|
self.validate_memory_range(&memory_range)?;
|
||||||
|
|
||||||
|
self.device_memory()
|
||||||
|
.flush_range(self.create_memory_range(memory_range))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn flush_range_unchecked(
|
||||||
|
&self,
|
||||||
|
memory_range: MappedMemoryRange,
|
||||||
|
) -> Result<(), VulkanError> {
|
||||||
|
self.device_memory()
|
||||||
|
.flush_range_unchecked(self.create_memory_range(memory_range))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_memory_range(
|
||||||
|
&self,
|
||||||
|
memory_range: &MappedMemoryRange,
|
||||||
|
) -> Result<(), Box<ValidationError>> {
|
||||||
|
let &MappedMemoryRange {
|
||||||
offset,
|
offset,
|
||||||
size,
|
size,
|
||||||
|
_ne: _,
|
||||||
|
} = memory_range;
|
||||||
|
|
||||||
|
if !(offset <= self.size() && size <= self.size() - offset) {
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
context: "memory_range".into(),
|
||||||
|
problem: "is not contained within the allocation".into(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
}
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This exists because even if no cache control is required, the parameters should still be
|
Ok(())
|
||||||
/// valid, otherwise you might have bugs in your code forever just because your memory happens
|
}
|
||||||
/// to be host-coherent.
|
|
||||||
fn debug_validate_memory_range(&self, range: &Range<DeviceSize>) {
|
|
||||||
debug_assert!(!range.is_empty() && range.end <= self.size);
|
|
||||||
|
|
||||||
let atom_size = self
|
fn create_memory_range(&self, memory_range: MappedMemoryRange) -> MappedMemoryRange {
|
||||||
.device()
|
let MappedMemoryRange {
|
||||||
.physical_device()
|
mut offset,
|
||||||
.properties()
|
mut size,
|
||||||
.non_coherent_atom_size;
|
_ne: _,
|
||||||
debug_assert!(
|
} = memory_range;
|
||||||
is_aligned(range.start, atom_size)
|
|
||||||
&& (is_aligned(range.end, atom_size) || range.end == self.size),
|
let memory = self.device_memory();
|
||||||
"attempted to invalidate or flush a memory range that is not aligned to the \
|
|
||||||
non-coherent atom size",
|
offset += self.offset();
|
||||||
|
|
||||||
|
// VUID-VkMappedMemoryRange-size-01390
|
||||||
|
if memory_range.offset + size == self.size() {
|
||||||
|
// We can align the end of the range like this without aliasing other allocations,
|
||||||
|
// because the suballocators ensure that all allocations are aligned to the atom size
|
||||||
|
// for non-host-coherent host-visible memory.
|
||||||
|
let end = cmp::min(
|
||||||
|
align_up(offset + size, memory.atom_size()),
|
||||||
|
memory.allocation_size(),
|
||||||
);
|
);
|
||||||
|
size = end - offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
MappedMemoryRange {
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
_ne: crate::NonExhaustive(()),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the underlying block of [`DeviceMemory`].
|
/// Returns the underlying block of [`DeviceMemory`].
|
||||||
@ -490,12 +426,6 @@ impl MemoryAlloc {
|
|||||||
/// [`shift`]: Self::shift
|
/// [`shift`]: Self::shift
|
||||||
#[inline]
|
#[inline]
|
||||||
pub unsafe fn set_offset(&mut self, new_offset: DeviceSize) {
|
pub unsafe fn set_offset(&mut self, new_offset: DeviceSize) {
|
||||||
if let Some(ptr) = self.mapped_ptr.as_mut() {
|
|
||||||
*ptr = NonNull::new_unchecked(
|
|
||||||
ptr.as_ptr()
|
|
||||||
.offset(new_offset as isize - self.offset as isize),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
self.offset = new_offset;
|
self.offset = new_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -642,8 +572,7 @@ unsafe impl DeviceOwned for MemoryAlloc {
|
|||||||
/// },
|
/// },
|
||||||
/// )
|
/// )
|
||||||
/// .unwrap(),
|
/// .unwrap(),
|
||||||
/// )
|
/// );
|
||||||
/// .unwrap();
|
|
||||||
///
|
///
|
||||||
/// // You can now feed `region` into any suballocator.
|
/// // You can now feed `region` into any suballocator.
|
||||||
/// ```
|
/// ```
|
||||||
@ -1159,26 +1088,10 @@ unsafe impl Suballocator for Arc<FreeListAllocator> {
|
|||||||
// constrained by the remaining size of the region.
|
// constrained by the remaining size of the region.
|
||||||
self.free_size.fetch_sub(size, Ordering::Release);
|
self.free_size.fetch_sub(size, Ordering::Release);
|
||||||
|
|
||||||
let mapped_ptr = self.region.mapped_ptr.map(|ptr| {
|
|
||||||
// This can't overflow because offsets in the free-list are confined
|
|
||||||
// to the range [region.offset, region.offset + region.size).
|
|
||||||
let relative_offset = offset - self.region.offset;
|
|
||||||
|
|
||||||
// SAFETY: Allocation sizes are guaranteed to not exceed
|
|
||||||
// `isize::MAX` when they have a mapped pointer, and the original
|
|
||||||
// pointer was handed to us from the Vulkan implementation,
|
|
||||||
// so the offset better be in range.
|
|
||||||
let ptr = ptr.as_ptr().offset(relative_offset as isize);
|
|
||||||
|
|
||||||
// SAFETY: Same as the previous.
|
|
||||||
NonNull::new_unchecked(ptr)
|
|
||||||
});
|
|
||||||
|
|
||||||
return Ok(MemoryAlloc {
|
return Ok(MemoryAlloc {
|
||||||
offset,
|
offset,
|
||||||
size,
|
size,
|
||||||
allocation_type,
|
allocation_type,
|
||||||
mapped_ptr,
|
|
||||||
atom_size: self.region.atom_size,
|
atom_size: self.region.atom_size,
|
||||||
parent: AllocParent::FreeList {
|
parent: AllocParent::FreeList {
|
||||||
allocator: self.clone(),
|
allocator: self.clone(),
|
||||||
@ -1773,25 +1686,10 @@ unsafe impl Suballocator for Arc<BuddyAllocator> {
|
|||||||
// constrained by the remaining size of the region.
|
// constrained by the remaining size of the region.
|
||||||
self.free_size.fetch_sub(size, Ordering::Release);
|
self.free_size.fetch_sub(size, Ordering::Release);
|
||||||
|
|
||||||
let mapped_ptr = self.region.mapped_ptr.map(|ptr| {
|
|
||||||
// This can't overflow because offsets in the free-list are confined to the
|
|
||||||
// range [region.offset, region.offset + region.size).
|
|
||||||
let relative_offset = offset - self.region.offset;
|
|
||||||
|
|
||||||
// SAFETY: Allocation sizes are guaranteed to not exceed `isize::MAX` when
|
|
||||||
// they have a mapped pointer, and the original pointer was handed to us
|
|
||||||
// from the Vulkan implementation, so the offset better be in range.
|
|
||||||
let ptr = unsafe { ptr.as_ptr().offset(relative_offset as isize) };
|
|
||||||
|
|
||||||
// SAFETY: Same as the previous.
|
|
||||||
unsafe { NonNull::new_unchecked(ptr) }
|
|
||||||
});
|
|
||||||
|
|
||||||
return Ok(MemoryAlloc {
|
return Ok(MemoryAlloc {
|
||||||
offset,
|
offset,
|
||||||
size: layout.size(),
|
size: layout.size(),
|
||||||
allocation_type,
|
allocation_type,
|
||||||
mapped_ptr,
|
|
||||||
atom_size: self.region.atom_size,
|
atom_size: self.region.atom_size,
|
||||||
parent: AllocParent::Buddy {
|
parent: AllocParent::Buddy {
|
||||||
allocator: self.clone(),
|
allocator: self.clone(),
|
||||||
@ -2176,21 +2074,10 @@ impl PoolAllocatorInner {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
let mapped_ptr = self.region.mapped_ptr.map(|ptr| {
|
|
||||||
// SAFETY: Allocation sizes are guaranteed to not exceed `isize::MAX` when they have a
|
|
||||||
// mapped pointer, and the original pointer was handed to us from the Vulkan
|
|
||||||
// implementation, so the offset better be in range.
|
|
||||||
let ptr = unsafe { ptr.as_ptr().offset(relative_offset as isize) };
|
|
||||||
|
|
||||||
// SAFETY: Same as the previous.
|
|
||||||
unsafe { NonNull::new_unchecked(ptr) }
|
|
||||||
});
|
|
||||||
|
|
||||||
Ok(MemoryAlloc {
|
Ok(MemoryAlloc {
|
||||||
offset,
|
offset,
|
||||||
size,
|
size,
|
||||||
allocation_type: self.region.allocation_type,
|
allocation_type: self.region.allocation_type,
|
||||||
mapped_ptr,
|
|
||||||
atom_size: self.region.atom_size,
|
atom_size: self.region.atom_size,
|
||||||
parent: AllocParent::Pool {
|
parent: AllocParent::Pool {
|
||||||
allocator: self,
|
allocator: self,
|
||||||
@ -2453,21 +2340,10 @@ unsafe impl Suballocator for Arc<BumpAllocator> {
|
|||||||
Ordering::Relaxed,
|
Ordering::Relaxed,
|
||||||
) {
|
) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
let mapped_ptr = self.region.mapped_ptr.map(|ptr| {
|
|
||||||
// SAFETY: Allocation sizes are guaranteed to not exceed `isize::MAX` when
|
|
||||||
// they have a mapped pointer, and the original pointer was handed to us
|
|
||||||
// from the Vulkan implementation, so the offset better be in range.
|
|
||||||
let ptr = unsafe { ptr.as_ptr().offset(relative_offset as isize) };
|
|
||||||
|
|
||||||
// SAFETY: Same as the previous.
|
|
||||||
unsafe { NonNull::new_unchecked(ptr) }
|
|
||||||
});
|
|
||||||
|
|
||||||
return Ok(MemoryAlloc {
|
return Ok(MemoryAlloc {
|
||||||
offset,
|
offset,
|
||||||
size,
|
size,
|
||||||
allocation_type,
|
allocation_type,
|
||||||
mapped_ptr,
|
|
||||||
atom_size: self.region.atom_size,
|
atom_size: self.region.atom_size,
|
||||||
parent: AllocParent::Bump(self.clone()),
|
parent: AllocParent::Bump(self.clone()),
|
||||||
});
|
});
|
||||||
@ -2644,44 +2520,6 @@ mod tests {
|
|||||||
use crate::memory::MemoryAllocateInfo;
|
use crate::memory::MemoryAllocateInfo;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn memory_alloc_set_offset() {
|
|
||||||
let (device, _) = gfx_dev_and_queue!();
|
|
||||||
let memory_type_index = device
|
|
||||||
.physical_device()
|
|
||||||
.memory_properties()
|
|
||||||
.memory_types
|
|
||||||
.iter()
|
|
||||||
.position(|memory_type| {
|
|
||||||
memory_type
|
|
||||||
.property_flags
|
|
||||||
.contains(MemoryPropertyFlags::HOST_VISIBLE)
|
|
||||||
})
|
|
||||||
.unwrap() as u32;
|
|
||||||
let mut alloc = MemoryAlloc::new(
|
|
||||||
DeviceMemory::allocate(
|
|
||||||
device,
|
|
||||||
MemoryAllocateInfo {
|
|
||||||
memory_type_index,
|
|
||||||
allocation_size: 1024,
|
|
||||||
..Default::default()
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
let ptr = alloc.mapped_ptr().unwrap().as_ptr();
|
|
||||||
|
|
||||||
unsafe {
|
|
||||||
alloc.set_offset(16);
|
|
||||||
assert_eq!(alloc.mapped_ptr().unwrap().as_ptr(), ptr.offset(16));
|
|
||||||
alloc.set_offset(0);
|
|
||||||
assert_eq!(alloc.mapped_ptr().unwrap().as_ptr(), ptr.offset(0));
|
|
||||||
alloc.set_offset(32);
|
|
||||||
assert_eq!(alloc.mapped_ptr().unwrap().as_ptr(), ptr.offset(32));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn free_list_allocator_capacity() {
|
fn free_list_allocator_capacity() {
|
||||||
const THREADS: DeviceSize = 12;
|
const THREADS: DeviceSize = 12;
|
||||||
@ -2782,7 +2620,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
PoolAllocator::new(
|
PoolAllocator::new(
|
||||||
MemoryAlloc::new(device_memory).unwrap(),
|
MemoryAlloc::new(device_memory),
|
||||||
DeviceAlignment::new(1).unwrap(),
|
DeviceAlignment::new(1).unwrap(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -2837,7 +2675,7 @@ mod tests {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
PoolAllocator::<BLOCK_SIZE>::new(
|
PoolAllocator::<BLOCK_SIZE>::new(
|
||||||
MemoryAlloc::new(device_memory).unwrap(),
|
MemoryAlloc::new(device_memory),
|
||||||
DeviceAlignment::new(1).unwrap(),
|
DeviceAlignment::new(1).unwrap(),
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
@ -2872,7 +2710,7 @@ mod tests {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut region = MemoryAlloc::new(device_memory).unwrap();
|
let mut region = MemoryAlloc::new(device_memory);
|
||||||
unsafe { region.set_allocation_type(allocation_type) };
|
unsafe { region.set_allocation_type(allocation_type) };
|
||||||
|
|
||||||
PoolAllocator::new(region, DeviceAlignment::new(256).unwrap())
|
PoolAllocator::new(region, DeviceAlignment::new(256).unwrap())
|
||||||
@ -3108,7 +2946,7 @@ mod tests {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let mut allocator = <$type>::new(MemoryAlloc::new(device_memory).unwrap());
|
let mut allocator = <$type>::new(MemoryAlloc::new(device_memory));
|
||||||
Arc::get_mut(&mut allocator)
|
Arc::get_mut(&mut allocator)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.buffer_image_granularity = DeviceAlignment::new($granularity).unwrap();
|
.buffer_image_granularity = DeviceAlignment::new($granularity).unwrap();
|
||||||
|
@ -22,7 +22,8 @@ use std::{
|
|||||||
mem::MaybeUninit,
|
mem::MaybeUninit,
|
||||||
num::NonZeroU64,
|
num::NonZeroU64,
|
||||||
ops::Range,
|
ops::Range,
|
||||||
ptr, slice,
|
ptr::{self, NonNull},
|
||||||
|
slice,
|
||||||
sync::{atomic::Ordering, Arc},
|
sync::{atomic::Ordering, Arc},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -61,6 +62,10 @@ pub struct DeviceMemory {
|
|||||||
export_handle_types: ExternalMemoryHandleTypes,
|
export_handle_types: ExternalMemoryHandleTypes,
|
||||||
imported_handle_type: Option<ExternalMemoryHandleType>,
|
imported_handle_type: Option<ExternalMemoryHandleType>,
|
||||||
flags: MemoryAllocateFlags,
|
flags: MemoryAllocateFlags,
|
||||||
|
|
||||||
|
mapping_state: Option<MappingState>,
|
||||||
|
atom_size: DeviceAlignment,
|
||||||
|
is_coherent: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DeviceMemory {
|
impl DeviceMemory {
|
||||||
@ -71,7 +76,6 @@ impl DeviceMemory {
|
|||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
///
|
///
|
||||||
/// - Panics if `allocate_info.allocation_size` is 0.
|
|
||||||
/// - Panics if `allocate_info.dedicated_allocation` is `Some` and the contained buffer or
|
/// - Panics if `allocate_info.dedicated_allocation` is `Some` and the contained buffer or
|
||||||
/// image does not belong to `device`.
|
/// image does not belong to `device`.
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -82,7 +86,7 @@ impl DeviceMemory {
|
|||||||
if !(device.api_version() >= Version::V1_1
|
if !(device.api_version() >= Version::V1_1
|
||||||
|| device.enabled_extensions().khr_dedicated_allocation)
|
|| device.enabled_extensions().khr_dedicated_allocation)
|
||||||
{
|
{
|
||||||
// Fall back instead of erroring out
|
// Fall back instead of erroring out.
|
||||||
allocate_info.dedicated_allocation = None;
|
allocate_info.dedicated_allocation = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,7 +103,6 @@ impl DeviceMemory {
|
|||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
///
|
///
|
||||||
/// - Panics if `allocate_info.allocation_size` is 0.
|
|
||||||
/// - Panics if `allocate_info.dedicated_allocation` is `Some` and the contained buffer or
|
/// - Panics if `allocate_info.dedicated_allocation` is `Some` and the contained buffer or
|
||||||
/// image does not belong to `device`.
|
/// image does not belong to `device`.
|
||||||
#[inline]
|
#[inline]
|
||||||
@ -111,7 +114,7 @@ impl DeviceMemory {
|
|||||||
if !(device.api_version() >= Version::V1_1
|
if !(device.api_version() >= Version::V1_1
|
||||||
|| device.enabled_extensions().khr_dedicated_allocation)
|
|| device.enabled_extensions().khr_dedicated_allocation)
|
||||||
{
|
{
|
||||||
// Fall back instead of erroring out
|
// Fall back instead of erroring out.
|
||||||
allocate_info.dedicated_allocation = None;
|
allocate_info.dedicated_allocation = None;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -281,16 +284,28 @@ impl DeviceMemory {
|
|||||||
output.assume_init()
|
output.assume_init()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let atom_size = device.physical_device().properties().non_coherent_atom_size;
|
||||||
|
|
||||||
|
let is_coherent = device.physical_device().memory_properties().memory_types
|
||||||
|
[memory_type_index as usize]
|
||||||
|
.property_flags
|
||||||
|
.intersects(MemoryPropertyFlags::HOST_COHERENT);
|
||||||
|
|
||||||
Ok(DeviceMemory {
|
Ok(DeviceMemory {
|
||||||
handle,
|
handle,
|
||||||
device: InstanceOwnedDebugWrapper(device),
|
device: InstanceOwnedDebugWrapper(device),
|
||||||
id: Self::next_id(),
|
id: Self::next_id(),
|
||||||
|
|
||||||
allocation_size,
|
allocation_size,
|
||||||
memory_type_index,
|
memory_type_index,
|
||||||
dedicated_to: dedicated_allocation.map(Into::into),
|
dedicated_to: dedicated_allocation.map(Into::into),
|
||||||
export_handle_types,
|
export_handle_types,
|
||||||
imported_handle_type,
|
imported_handle_type,
|
||||||
flags,
|
flags,
|
||||||
|
|
||||||
|
mapping_state: None,
|
||||||
|
atom_size,
|
||||||
|
is_coherent,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,16 +330,28 @@ impl DeviceMemory {
|
|||||||
_ne: _,
|
_ne: _,
|
||||||
} = allocate_info;
|
} = allocate_info;
|
||||||
|
|
||||||
|
let atom_size = device.physical_device().properties().non_coherent_atom_size;
|
||||||
|
|
||||||
|
let is_coherent = device.physical_device().memory_properties().memory_types
|
||||||
|
[memory_type_index as usize]
|
||||||
|
.property_flags
|
||||||
|
.intersects(MemoryPropertyFlags::HOST_COHERENT);
|
||||||
|
|
||||||
DeviceMemory {
|
DeviceMemory {
|
||||||
handle,
|
handle,
|
||||||
device: InstanceOwnedDebugWrapper(device),
|
device: InstanceOwnedDebugWrapper(device),
|
||||||
id: Self::next_id(),
|
id: Self::next_id(),
|
||||||
|
|
||||||
allocation_size,
|
allocation_size,
|
||||||
memory_type_index,
|
memory_type_index,
|
||||||
dedicated_to: dedicated_allocation.map(Into::into),
|
dedicated_to: dedicated_allocation.map(Into::into),
|
||||||
export_handle_types,
|
export_handle_types,
|
||||||
imported_handle_type: None,
|
imported_handle_type: None,
|
||||||
flags,
|
flags,
|
||||||
|
|
||||||
|
mapping_state: None,
|
||||||
|
atom_size,
|
||||||
|
is_coherent,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -370,6 +397,305 @@ impl DeviceMemory {
|
|||||||
self.flags
|
self.flags
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the current mapping state, or [`None`] if the memory is not currently host-mapped.
|
||||||
|
#[inline]
|
||||||
|
pub fn mapping_state(&self) -> Option<&MappingState> {
|
||||||
|
self.mapping_state.as_ref()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn atom_size(&self) -> DeviceAlignment {
|
||||||
|
self.atom_size
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Maps a range of memory to be accessed by the host.
|
||||||
|
///
|
||||||
|
/// `self` must not be host-mapped already and must be allocated from host-visible memory.
|
||||||
|
#[inline]
|
||||||
|
pub fn map(&mut self, map_info: MemoryMapInfo) -> Result<(), Validated<VulkanError>> {
|
||||||
|
self.validate_map(&map_info)?;
|
||||||
|
|
||||||
|
unsafe { Ok(self.map_unchecked(map_info)?) }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_map(&self, map_info: &MemoryMapInfo) -> Result<(), Box<ValidationError>> {
|
||||||
|
if self.mapping_state.is_some() {
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
problem: "this device memory is already host-mapped".into(),
|
||||||
|
vuids: &["VUID-vkMapMemory-memory-00678"],
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
map_info
|
||||||
|
.validate(self)
|
||||||
|
.map_err(|err| err.add_context("map_info"))?;
|
||||||
|
|
||||||
|
let memory_type = &self
|
||||||
|
.device()
|
||||||
|
.physical_device()
|
||||||
|
.memory_properties()
|
||||||
|
.memory_types[self.memory_type_index() as usize];
|
||||||
|
|
||||||
|
if !memory_type
|
||||||
|
.property_flags
|
||||||
|
.intersects(MemoryPropertyFlags::HOST_VISIBLE)
|
||||||
|
{
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
problem: "`self.memory_type_index()` refers to a memory type whose \
|
||||||
|
`property_flags` does not contain `MemoryPropertyFlags::HOST_VISIBLE`"
|
||||||
|
.into(),
|
||||||
|
vuids: &["VUID-vkMapMemory-memory-00682"],
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
|
||||||
|
pub unsafe fn map_unchecked(&mut self, map_info: MemoryMapInfo) -> Result<(), VulkanError> {
|
||||||
|
let MemoryMapInfo {
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
_ne: _,
|
||||||
|
} = map_info;
|
||||||
|
|
||||||
|
// Sanity check: this would lead to UB when calculating pointer offsets.
|
||||||
|
assert!(size <= isize::MAX.try_into().unwrap());
|
||||||
|
|
||||||
|
let device = self.device();
|
||||||
|
|
||||||
|
let ptr = {
|
||||||
|
let fns = device.fns();
|
||||||
|
let mut output = MaybeUninit::uninit();
|
||||||
|
|
||||||
|
if device.enabled_extensions().khr_map_memory2 {
|
||||||
|
let map_info_vk = ash::vk::MemoryMapInfoKHR {
|
||||||
|
flags: ash::vk::MemoryMapFlags::empty(),
|
||||||
|
memory: self.handle(),
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
(fns.khr_map_memory2.map_memory2_khr)(
|
||||||
|
device.handle(),
|
||||||
|
&map_info_vk,
|
||||||
|
output.as_mut_ptr(),
|
||||||
|
)
|
||||||
|
.result()
|
||||||
|
.map_err(VulkanError::from)?;
|
||||||
|
} else {
|
||||||
|
(fns.v1_0.map_memory)(
|
||||||
|
device.handle(),
|
||||||
|
self.handle,
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
ash::vk::MemoryMapFlags::empty(),
|
||||||
|
output.as_mut_ptr(),
|
||||||
|
)
|
||||||
|
.result()
|
||||||
|
.map_err(VulkanError::from)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
output.assume_init()
|
||||||
|
};
|
||||||
|
|
||||||
|
let ptr = NonNull::new(ptr).unwrap();
|
||||||
|
let range = offset..offset + size;
|
||||||
|
self.mapping_state = Some(MappingState { ptr, range });
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unmaps the memory. It will no longer be accessible from the host.
|
||||||
|
///
|
||||||
|
/// `self` must be currently host-mapped.
|
||||||
|
//
|
||||||
|
// NOTE(Marc): The `&mut` here is more than just because we need to mutate the struct.
|
||||||
|
// `vkMapMemory` and `vkUnmapMemory` must be externally synchronized, but more importantly, if
|
||||||
|
// we allowed unmapping through a shared reference, it would be possible to unmap a resource
|
||||||
|
// that's currently being read or written by the host elsewhere, requiring even more locking on
|
||||||
|
// each host access.
|
||||||
|
#[inline]
|
||||||
|
pub fn unmap(&mut self, unmap_info: MemoryUnmapInfo) -> Result<(), Validated<VulkanError>> {
|
||||||
|
self.validate_unmap(&unmap_info)?;
|
||||||
|
|
||||||
|
unsafe { self.unmap_unchecked(unmap_info) }?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_unmap(&self, unmap_info: &MemoryUnmapInfo) -> Result<(), Box<ValidationError>> {
|
||||||
|
if self.mapping_state.is_none() {
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
problem: "this device memory is not currently host-mapped".into(),
|
||||||
|
vuids: &["VUID-vkUnmapMemory-memory-00689"],
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
unmap_info
|
||||||
|
.validate(self)
|
||||||
|
.map_err(|err| err.add_context("unmap_info"))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
|
||||||
|
pub unsafe fn unmap_unchecked(
|
||||||
|
&mut self,
|
||||||
|
unmap_info: MemoryUnmapInfo,
|
||||||
|
) -> Result<(), VulkanError> {
|
||||||
|
let MemoryUnmapInfo { _ne: _ } = unmap_info;
|
||||||
|
|
||||||
|
let device = self.device();
|
||||||
|
let fns = device.fns();
|
||||||
|
|
||||||
|
if device.enabled_extensions().khr_map_memory2 {
|
||||||
|
let unmap_info_vk = ash::vk::MemoryUnmapInfoKHR {
|
||||||
|
flags: ash::vk::MemoryUnmapFlagsKHR::empty(),
|
||||||
|
memory: self.handle(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
(fns.khr_map_memory2.unmap_memory2_khr)(device.handle(), &unmap_info_vk)
|
||||||
|
.result()
|
||||||
|
.map_err(VulkanError::from)?;
|
||||||
|
} else {
|
||||||
|
(fns.v1_0.unmap_memory)(device.handle(), self.handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.mapping_state = None;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Invalidates the host cache for a range of mapped memory.
|
||||||
|
///
|
||||||
|
/// If the device memory is not [host-coherent], you must call this function before the memory
|
||||||
|
/// is read by the host, if the device previously wrote to the memory. It has no effect if the
|
||||||
|
/// memory is host-coherent.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// - If there are memory writes by the device that have not been propagated into the host
|
||||||
|
/// cache, then there must not be any references in Rust code to any portion of the specified
|
||||||
|
/// `memory_range`.
|
||||||
|
///
|
||||||
|
/// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
|
||||||
|
/// [`map`]: Self::map
|
||||||
|
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn invalidate_range(
|
||||||
|
&self,
|
||||||
|
memory_range: MappedMemoryRange,
|
||||||
|
) -> Result<(), Validated<VulkanError>> {
|
||||||
|
self.validate_memory_range(&memory_range)?;
|
||||||
|
|
||||||
|
Ok(self.invalidate_range_unchecked(memory_range)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn invalidate_range_unchecked(
|
||||||
|
&self,
|
||||||
|
memory_range: MappedMemoryRange,
|
||||||
|
) -> Result<(), VulkanError> {
|
||||||
|
if self.is_coherent {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let MappedMemoryRange {
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
_ne: _,
|
||||||
|
} = memory_range;
|
||||||
|
|
||||||
|
let memory_range_vk = ash::vk::MappedMemoryRange {
|
||||||
|
memory: self.handle(),
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let fns = self.device().fns();
|
||||||
|
(fns.v1_0.invalidate_mapped_memory_ranges)(self.device().handle(), 1, &memory_range_vk)
|
||||||
|
.result()
|
||||||
|
.map_err(VulkanError::from)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flushes the host cache for a range of mapped memory.
|
||||||
|
///
|
||||||
|
/// If the device memory is not [host-coherent], you must call this function after writing to
|
||||||
|
/// the memory, if the device is going to read the memory. It has no effect if the memory is
|
||||||
|
/// host-coherent.
|
||||||
|
///
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// - There must be no operations pending or executing in a device queue, that access the
|
||||||
|
/// specified `memory_range`.
|
||||||
|
///
|
||||||
|
/// [host-coherent]: crate::memory::MemoryPropertyFlags::HOST_COHERENT
|
||||||
|
/// [`map`]: Self::map
|
||||||
|
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn flush_range(
|
||||||
|
&self,
|
||||||
|
memory_range: MappedMemoryRange,
|
||||||
|
) -> Result<(), Validated<VulkanError>> {
|
||||||
|
self.validate_memory_range(&memory_range)?;
|
||||||
|
|
||||||
|
Ok(self.flush_range_unchecked(memory_range)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn flush_range_unchecked(
|
||||||
|
&self,
|
||||||
|
memory_range: MappedMemoryRange,
|
||||||
|
) -> Result<(), VulkanError> {
|
||||||
|
if self.is_coherent {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let MappedMemoryRange {
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
_ne: _,
|
||||||
|
} = memory_range;
|
||||||
|
|
||||||
|
let memory_range_vk = ash::vk::MappedMemoryRange {
|
||||||
|
memory: self.handle(),
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let fns = self.device().fns();
|
||||||
|
(fns.v1_0.flush_mapped_memory_ranges)(self.device().handle(), 1, &memory_range_vk)
|
||||||
|
.result()
|
||||||
|
.map_err(VulkanError::from)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// NOTE(Marc): We are validating the parameters regardless of whether the memory is
|
||||||
|
// non-coherent on purpose, to catch potential bugs arising because the code isn't tested on
|
||||||
|
// such hardware.
|
||||||
|
fn validate_memory_range(
|
||||||
|
&self,
|
||||||
|
memory_range: &MappedMemoryRange,
|
||||||
|
) -> Result<(), Box<ValidationError>> {
|
||||||
|
memory_range
|
||||||
|
.validate(self)
|
||||||
|
.map_err(|err| err.add_context("memory_range"))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Retrieves the amount of lazily-allocated memory that is currently commited to this
|
/// Retrieves the amount of lazily-allocated memory that is currently commited to this
|
||||||
/// memory object.
|
/// memory object.
|
||||||
///
|
///
|
||||||
@ -1034,7 +1360,7 @@ vulkan_bitflags_enum! {
|
|||||||
vulkan_bitflags! {
|
vulkan_bitflags! {
|
||||||
#[non_exhaustive]
|
#[non_exhaustive]
|
||||||
|
|
||||||
/// A mask specifying flags for device memory allocation.
|
/// Flags specifying additional properties of a device memory allocation.
|
||||||
MemoryAllocateFlags = MemoryAllocateFlags(u32);
|
MemoryAllocateFlags = MemoryAllocateFlags(u32);
|
||||||
|
|
||||||
/* TODO: enable
|
/* TODO: enable
|
||||||
@ -1054,6 +1380,306 @@ vulkan_bitflags! {
|
|||||||
DEVICE_ADDRESS_CAPTURE_REPLAY = DEVICE_ADDRESS_CAPTURE_REPLAY,*/
|
DEVICE_ADDRESS_CAPTURE_REPLAY = DEVICE_ADDRESS_CAPTURE_REPLAY,*/
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Parameters of a memory map operation.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct MemoryMapInfo {
|
||||||
|
/// The offset (in bytes) from the beginning of the `DeviceMemory`, where the mapping starts.
|
||||||
|
///
|
||||||
|
/// Must be less than the [`allocation_size`] of the device memory. If the the memory was not
|
||||||
|
/// allocated from [host-coherent] memory, then this must be a multiple of the
|
||||||
|
/// [`non_coherent_atom_size`] device property.
|
||||||
|
///
|
||||||
|
/// The default value is `0`.
|
||||||
|
///
|
||||||
|
/// [`allocation_size`]: DeviceMemory::allocation_size
|
||||||
|
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
|
||||||
|
pub offset: DeviceSize,
|
||||||
|
|
||||||
|
/// The size (in bytes) of the mapping.
|
||||||
|
///
|
||||||
|
/// Must be less than or equal to the [`allocation_size`] of the device memory minus `offset`.
|
||||||
|
/// If the the memory was not allocated from [host-coherent] memory, then this must be a
|
||||||
|
/// multiple of the [`non_coherent_atom_size`] device property, or be equal to the allocation
|
||||||
|
/// size minus `offset`.
|
||||||
|
///
|
||||||
|
/// The default value is `0`, which must be overridden.
|
||||||
|
///
|
||||||
|
/// [`allocation_size`]: DeviceMemory::allocation_size
|
||||||
|
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
|
||||||
|
pub size: DeviceSize,
|
||||||
|
|
||||||
|
pub _ne: crate::NonExhaustive,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MemoryMapInfo {
|
||||||
|
pub(crate) fn validate(&self, memory: &DeviceMemory) -> Result<(), Box<ValidationError>> {
|
||||||
|
let &Self {
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
_ne: _,
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
if !(offset < memory.allocation_size()) {
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
context: "offset".into(),
|
||||||
|
problem: "is not less than `self.allocation_size()`".into(),
|
||||||
|
vuids: &["VUID-vkMapMemory-offset-00679"],
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
if size == 0 {
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
context: "size".into(),
|
||||||
|
problem: "is zero".into(),
|
||||||
|
vuids: &["VUID-vkMapMemory-size-00680"],
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !(size <= memory.allocation_size() - offset) {
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
context: "size".into(),
|
||||||
|
problem: "is not less than or equal to `self.allocation_size()` minus `offset`"
|
||||||
|
.into(),
|
||||||
|
vuids: &["VUID-vkMapMemory-size-00681"],
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
let atom_size = memory.atom_size();
|
||||||
|
|
||||||
|
// Not required for merely mapping, but without this check the user can end up with
|
||||||
|
// parts of the mapped memory at the start and end that they're not able to
|
||||||
|
// invalidate/flush, which is probably unintended.
|
||||||
|
//
|
||||||
|
// NOTE(Marc): We also rely on this for soundness, because it is easier and more optimal to
|
||||||
|
// not have to worry about whether a range of mapped memory is still in bounds of the
|
||||||
|
// mapped memory after being aligned to the non-coherent atom size.
|
||||||
|
if !memory.is_coherent
|
||||||
|
&& (!is_aligned(offset, atom_size)
|
||||||
|
|| (!is_aligned(size, atom_size) && offset + size != memory.allocation_size()))
|
||||||
|
{
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
problem: "`self.memory_type_index()` refers to a memory type whose \
|
||||||
|
`property_flags` does not contain `MemoryPropertyFlags::HOST_COHERENT`, and \
|
||||||
|
`offset` and/or `size` are not aligned to the `non_coherent_atom_size` device \
|
||||||
|
property"
|
||||||
|
.into(),
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for MemoryMapInfo {
|
||||||
|
#[inline]
|
||||||
|
fn default() -> Self {
|
||||||
|
MemoryMapInfo {
|
||||||
|
offset: 0,
|
||||||
|
size: 0,
|
||||||
|
_ne: crate::NonExhaustive(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parameters of a memory unmap operation.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct MemoryUnmapInfo {
|
||||||
|
pub _ne: crate::NonExhaustive,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MemoryUnmapInfo {
|
||||||
|
pub(crate) fn validate(&self, _memory: &DeviceMemory) -> Result<(), Box<ValidationError>> {
|
||||||
|
let &Self { _ne: _ } = self;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for MemoryUnmapInfo {
|
||||||
|
#[inline]
|
||||||
|
fn default() -> Self {
|
||||||
|
MemoryUnmapInfo {
|
||||||
|
_ne: crate::NonExhaustive(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents the currently host-mapped region of a [`DeviceMemory`] block.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct MappingState {
|
||||||
|
ptr: NonNull<c_void>,
|
||||||
|
range: Range<DeviceSize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// It is safe to share `ptr` between threads because the user would have to use unsafe code
|
||||||
|
// themself to get UB in the first place.
|
||||||
|
unsafe impl Send for MappingState {}
|
||||||
|
unsafe impl Sync for MappingState {}
|
||||||
|
|
||||||
|
impl MappingState {
|
||||||
|
/// Returns the pointer to the start of the mapped memory. Meaning that the pointer is already
|
||||||
|
/// offset by the [`offset`].
|
||||||
|
///
|
||||||
|
/// [`offset`]: Self::offset
|
||||||
|
#[inline]
|
||||||
|
pub fn ptr(&self) -> NonNull<c_void> {
|
||||||
|
self.ptr
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the offset given to [`DeviceMemory::map`].
|
||||||
|
#[inline]
|
||||||
|
pub fn offset(&self) -> DeviceSize {
|
||||||
|
self.range.start
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the size given to [`DeviceMemory::map`].
|
||||||
|
#[inline]
|
||||||
|
pub fn size(&self) -> DeviceSize {
|
||||||
|
self.range.end - self.range.start
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a pointer to a slice of the mapped memory. Returns `None` if out of bounds.
|
||||||
|
///
|
||||||
|
/// `range` is specified in bytes relative to the start of the memory allocation, and must fall
|
||||||
|
/// within the range of the memory mapping given to [`DeviceMemory::map`].
|
||||||
|
///
|
||||||
|
/// This function is safe in the sense that the returned pointer is guaranteed to be within
|
||||||
|
/// bounds of the mapped memory, however dereferencing the pointer isn't:
|
||||||
|
///
|
||||||
|
/// - Normal Rust aliasing rules apply: if you create a mutable reference out of the pointer,
|
||||||
|
/// you must ensure that no other references exist in Rust to any portion of the same memory.
|
||||||
|
/// - While a reference created from the pointer exists, there must be no operations pending or
|
||||||
|
/// executing in any queue on the device, that write to any portion of the same memory.
|
||||||
|
/// - While a mutable reference created from the pointer exists, there must be no operations
|
||||||
|
/// pending or executing in any queue on the device, that read from any portion of the same
|
||||||
|
/// memory.
|
||||||
|
#[inline]
|
||||||
|
pub fn slice(&self, range: Range<DeviceSize>) -> Option<NonNull<[u8]>> {
|
||||||
|
if self.range.start <= range.start
|
||||||
|
&& range.start <= range.end
|
||||||
|
&& range.end <= self.range.end
|
||||||
|
{
|
||||||
|
// SAFETY: We checked that the range is within the currently mapped range.
|
||||||
|
Some(unsafe { self.slice_unchecked(range) })
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// - `range` must be within the currently mapped range.
|
||||||
|
#[cfg_attr(not(feature = "document_unchecked"), doc(hidden))]
|
||||||
|
#[inline]
|
||||||
|
pub unsafe fn slice_unchecked(&self, range: Range<DeviceSize>) -> NonNull<[u8]> {
|
||||||
|
let ptr = self.ptr.as_ptr();
|
||||||
|
|
||||||
|
// SAFETY: The caller must guarantee that `range` is within the currently mapped range,
|
||||||
|
// which means that the offset pointer and length must denote a slice that's contained
|
||||||
|
// within the allocated (mapped) object.
|
||||||
|
let ptr = ptr.add((range.start - self.range.start) as usize);
|
||||||
|
let len = (range.end - range.start) as usize;
|
||||||
|
|
||||||
|
let ptr = ptr::slice_from_raw_parts_mut(<*mut c_void>::cast::<u8>(ptr), len);
|
||||||
|
|
||||||
|
// SAFETY: The original pointer was non-null, and the caller must guarantee that `range`
|
||||||
|
// is within the currently mapped range, which means that the offset couldn't have wrapped
|
||||||
|
// around the address space.
|
||||||
|
NonNull::new_unchecked(ptr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents a range of host-mapped [`DeviceMemory`] to be invalidated or flushed.
|
||||||
|
///
|
||||||
|
/// Must be contained within the currently mapped range of the device memory.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct MappedMemoryRange {
|
||||||
|
/// The offset (in bytes) from the beginning of the allocation, where the range starts.
|
||||||
|
///
|
||||||
|
/// Must be a multiple of the [`non_coherent_atom_size`] device property.
|
||||||
|
///
|
||||||
|
/// The default value is `0`.
|
||||||
|
///
|
||||||
|
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
|
||||||
|
pub offset: DeviceSize,
|
||||||
|
|
||||||
|
/// The size (in bytes) of the range.
|
||||||
|
///
|
||||||
|
/// Must be a multiple of the [`non_coherent_atom_size`] device property, or be equal to the
|
||||||
|
/// allocation size minus `offset`.
|
||||||
|
///
|
||||||
|
/// The default value is `0`.
|
||||||
|
///
|
||||||
|
/// [`non_coherent_atom_size`]: crate::device::Properties::non_coherent_atom_size
|
||||||
|
pub size: DeviceSize,
|
||||||
|
|
||||||
|
pub _ne: crate::NonExhaustive,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MappedMemoryRange {
|
||||||
|
pub(crate) fn validate(&self, memory: &DeviceMemory) -> Result<(), Box<ValidationError>> {
|
||||||
|
let &Self {
|
||||||
|
offset,
|
||||||
|
size,
|
||||||
|
_ne: _,
|
||||||
|
} = self;
|
||||||
|
|
||||||
|
if let Some(state) = &memory.mapping_state {
|
||||||
|
if !(state.range.start <= offset && size <= state.range.end - offset) {
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
problem: "is not contained within the mapped range of this device memory"
|
||||||
|
.into(),
|
||||||
|
vuids: &["VUID-VkMappedMemoryRange-size-00685"],
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
problem: "this device memory is not currently host-mapped".into(),
|
||||||
|
vuids: &["VUID-VkMappedMemoryRange-memory-00684"],
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !is_aligned(offset, memory.atom_size()) {
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
context: "offset".into(),
|
||||||
|
problem: "is not aligned to the `non_coherent_atom_size` device property".into(),
|
||||||
|
vuids: &["VUID-VkMappedMemoryRange-offset-00687"],
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !(is_aligned(size, memory.atom_size()) || size == memory.allocation_size() - offset) {
|
||||||
|
return Err(Box::new(ValidationError {
|
||||||
|
context: "size".into(),
|
||||||
|
problem: "is not aligned to the `non_coherent_atom_size` device property nor \
|
||||||
|
equal to `self.allocation_size()` minus `offset`"
|
||||||
|
.into(),
|
||||||
|
vuids: &["VUID-VkMappedMemoryRange-size-01390"],
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for MappedMemoryRange {
|
||||||
|
#[inline]
|
||||||
|
fn default() -> Self {
|
||||||
|
MappedMemoryRange {
|
||||||
|
offset: 0,
|
||||||
|
size: 0,
|
||||||
|
_ne: crate::NonExhaustive(()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Represents device memory that has been mapped in a CPU-accessible space.
|
/// Represents device memory that has been mapped in a CPU-accessible space.
|
||||||
///
|
///
|
||||||
/// In order to access the contents of the allocated memory, you can use the `read` and `write`
|
/// In order to access the contents of the allocated memory, you can use the `read` and `write`
|
||||||
@ -1095,6 +1721,10 @@ vulkan_bitflags! {
|
|||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
#[deprecated(
|
||||||
|
since = "0.34.0",
|
||||||
|
note = "use the methods provided directly on `DeviceMemory` instead"
|
||||||
|
)]
|
||||||
pub struct MappedDeviceMemory {
|
pub struct MappedDeviceMemory {
|
||||||
memory: DeviceMemory,
|
memory: DeviceMemory,
|
||||||
pointer: *mut c_void, // points to `range.start`
|
pointer: *mut c_void, // points to `range.start`
|
||||||
@ -1110,6 +1740,7 @@ pub struct MappedDeviceMemory {
|
|||||||
// Vulkan specs, documentation of `vkFreeMemory`:
|
// Vulkan specs, documentation of `vkFreeMemory`:
|
||||||
// > If a memory object is mapped at the time it is freed, it is implicitly unmapped.
|
// > If a memory object is mapped at the time it is freed, it is implicitly unmapped.
|
||||||
|
|
||||||
|
#[allow(deprecated)]
|
||||||
impl MappedDeviceMemory {
|
impl MappedDeviceMemory {
|
||||||
/// Maps a range of memory to be accessed by the CPU.
|
/// Maps a range of memory to be accessed by the CPU.
|
||||||
///
|
///
|
||||||
@ -1165,8 +1796,14 @@ impl MappedDeviceMemory {
|
|||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
// VUID-vkMapMemory-memory-00678
|
if memory.mapping_state().is_some() {
|
||||||
// Guaranteed because we take ownership of `memory`, no other mapping can exist.
|
return Err(Box::new(ValidationError {
|
||||||
|
context: "memory".into(),
|
||||||
|
problem: "is already host-mapped".into(),
|
||||||
|
vuids: &["VUID-vkMapMemory-memory-00678"],
|
||||||
|
..Default::default()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
if range.end > memory.allocation_size {
|
if range.end > memory.allocation_size {
|
||||||
return Err(Box::new(ValidationError {
|
return Err(Box::new(ValidationError {
|
||||||
@ -1208,6 +1845,9 @@ impl MappedDeviceMemory {
|
|||||||
memory: DeviceMemory,
|
memory: DeviceMemory,
|
||||||
range: Range<DeviceSize>,
|
range: Range<DeviceSize>,
|
||||||
) -> Result<Self, VulkanError> {
|
) -> Result<Self, VulkanError> {
|
||||||
|
// Sanity check: this would lead to UB when calculating pointer offsets.
|
||||||
|
assert!(range.end - range.start <= isize::MAX.try_into().unwrap());
|
||||||
|
|
||||||
let device = memory.device();
|
let device = memory.device();
|
||||||
|
|
||||||
let pointer = unsafe {
|
let pointer = unsafe {
|
||||||
@ -1480,6 +2120,7 @@ impl MappedDeviceMemory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(deprecated)]
|
||||||
impl AsRef<DeviceMemory> for MappedDeviceMemory {
|
impl AsRef<DeviceMemory> for MappedDeviceMemory {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_ref(&self) -> &DeviceMemory {
|
fn as_ref(&self) -> &DeviceMemory {
|
||||||
@ -1487,6 +2128,7 @@ impl AsRef<DeviceMemory> for MappedDeviceMemory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(deprecated)]
|
||||||
impl AsMut<DeviceMemory> for MappedDeviceMemory {
|
impl AsMut<DeviceMemory> for MappedDeviceMemory {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn as_mut(&mut self) -> &mut DeviceMemory {
|
fn as_mut(&mut self) -> &mut DeviceMemory {
|
||||||
@ -1494,6 +2136,7 @@ impl AsMut<DeviceMemory> for MappedDeviceMemory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(deprecated)]
|
||||||
unsafe impl DeviceOwned for MappedDeviceMemory {
|
unsafe impl DeviceOwned for MappedDeviceMemory {
|
||||||
#[inline]
|
#[inline]
|
||||||
fn device(&self) -> &Arc<Device> {
|
fn device(&self) -> &Arc<Device> {
|
||||||
@ -1501,7 +2144,9 @@ unsafe impl DeviceOwned for MappedDeviceMemory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(deprecated)]
|
||||||
unsafe impl Send for MappedDeviceMemory {}
|
unsafe impl Send for MappedDeviceMemory {}
|
||||||
|
#[allow(deprecated)]
|
||||||
unsafe impl Sync for MappedDeviceMemory {}
|
unsafe impl Sync for MappedDeviceMemory {}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -15,11 +15,11 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
|||||||
use std::{
|
use std::{
|
||||||
alloc::Layout,
|
alloc::Layout,
|
||||||
cmp::Ordering,
|
cmp::Ordering,
|
||||||
ffi::c_void,
|
|
||||||
fmt::{Debug, Display, Formatter, Result as FmtResult},
|
fmt::{Debug, Display, Formatter, Result as FmtResult},
|
||||||
hash::{Hash, Hasher},
|
hash::{Hash, Hasher},
|
||||||
mem::{align_of, size_of, MaybeUninit},
|
mem::{size_of, MaybeUninit},
|
||||||
ops::{Deref, DerefMut},
|
ops::{Deref, DerefMut},
|
||||||
|
ptr::NonNull,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// A newtype wrapper around `T`, with `N` bytes of trailing padding.
|
/// A newtype wrapper around `T`, with `N` bytes of trailing padding.
|
||||||
@ -303,11 +303,10 @@ where
|
|||||||
panic!("zero-sized types are not valid buffer contents");
|
panic!("zero-sized types are not valid buffer contents");
|
||||||
};
|
};
|
||||||
|
|
||||||
unsafe fn from_ffi(data: *mut c_void, range: usize) -> *mut Self {
|
unsafe fn ptr_from_slice(slice: NonNull<[u8]>) -> *mut Self {
|
||||||
debug_assert!(range == size_of::<Self>());
|
debug_assert!(slice.len() == size_of::<Padded<T, N>>());
|
||||||
debug_assert!(data as usize % align_of::<Self>() == 0);
|
|
||||||
|
|
||||||
data.cast()
|
<*mut [u8]>::cast::<Padded<T, N>>(slice.as_ptr())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ pub use self::{
|
|||||||
MemoryBarrier, PipelineStage, PipelineStages, QueueFamilyOwnershipTransfer,
|
MemoryBarrier, PipelineStage, PipelineStages, QueueFamilyOwnershipTransfer,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
use crate::{device::Queue, ValidationError, VulkanError};
|
use crate::{device::Queue, VulkanError};
|
||||||
use std::{
|
use std::{
|
||||||
error::Error,
|
error::Error,
|
||||||
fmt::{Display, Formatter},
|
fmt::{Display, Formatter},
|
||||||
@ -104,7 +104,8 @@ pub(crate) enum CurrentAccess {
|
|||||||
pub enum HostAccessError {
|
pub enum HostAccessError {
|
||||||
AccessConflict(AccessConflict),
|
AccessConflict(AccessConflict),
|
||||||
Invalidate(VulkanError),
|
Invalidate(VulkanError),
|
||||||
ValidationError(Box<ValidationError>),
|
NotHostMapped,
|
||||||
|
OutOfMappedRange,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Error for HostAccessError {
|
impl Error for HostAccessError {
|
||||||
@ -112,7 +113,7 @@ impl Error for HostAccessError {
|
|||||||
match self {
|
match self {
|
||||||
Self::AccessConflict(err) => Some(err),
|
Self::AccessConflict(err) => Some(err),
|
||||||
Self::Invalidate(err) => Some(err),
|
Self::Invalidate(err) => Some(err),
|
||||||
Self::ValidationError(err) => Some(err),
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -124,7 +125,13 @@ impl Display for HostAccessError {
|
|||||||
write!(f, "the resource is already in use in a conflicting way")
|
write!(f, "the resource is already in use in a conflicting way")
|
||||||
}
|
}
|
||||||
HostAccessError::Invalidate(_) => write!(f, "invalidating the device memory failed"),
|
HostAccessError::Invalidate(_) => write!(f, "invalidating the device memory failed"),
|
||||||
HostAccessError::ValidationError(_) => write!(f, "validation error"),
|
HostAccessError::NotHostMapped => {
|
||||||
|
write!(f, "the device memory is not current host-mapped")
|
||||||
|
}
|
||||||
|
HostAccessError::OutOfMappedRange => write!(
|
||||||
|
f,
|
||||||
|
"the requested range is not within the currently mapped range of device memory",
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user