Allow external memory support with DeviceLocalBuffers (#1506)

* Add posix handle type shortcut

* Expose device local buffer's memory

* Add exportable memory option for pools & device local memory

* Add test for device local with exportable fd

* Use a convenience function in test

* Dont expose memory, but instead allow export fd directly

* Fix mistakes & clean

* Remove test due to IncompatibleDriver error
This commit is contained in:
Okko Hakola 2021-03-14 15:56:27 +02:00 committed by GitHub
parent 1faa0c0668
commit 0e581c07b2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 581 additions and 92 deletions

1
.gitignore vendored
View File

@ -3,3 +3,4 @@ target
.cargo
examples/**/triangle.png
examples/**/mandelbrot.png
.idea

View File

@ -1,4 +1,5 @@
# Unreleased
- Added external memory support for `DeviceLocalBuffer` for `Linux`
- Fixed `shader!` generated descriptor set layouts for shader modules with multiple entrypoints.
- **Breaking** Prefixed `shader!` generated descriptor set `Layout` structs with the name of the entrypoint the layout belongs to. For shaders generated from GLSL source, this means `Layout` has been renamed to `MainLayout`.

View File

@ -40,8 +40,9 @@ use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPoolAlloc;
use memory::DedicatedAlloc;
use memory::DeviceMemoryAllocError;
use memory::{DedicatedAlloc, MemoryRequirements};
use memory::{DeviceMemoryAllocError, ExternalMemoryHandleType};
use std::fs::File;
use sync::AccessError;
use sync::Sharing;
@ -132,20 +133,7 @@ impl<T: ?Sized> DeviceLocalBuffer<T> {
.map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let (buffer, mem_reqs) = {
let sharing = if queue_families.len() >= 2 {
Sharing::Concurrent(queue_families.iter().cloned())
} else {
Sharing::Exclusive
};
match UnsafeBuffer::new(device.clone(), size, usage, sharing, SparseLevel::none()) {
Ok(b) => b,
Err(BufferCreationError::AllocError(err)) => return Err(err),
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
// errors can't happen
}
};
let (buffer, mem_reqs) = Self::build_buffer(&device, size, usage, &queue_families)?;
let mem = MemoryPool::alloc_from_requirements(
&Device::standard_pool(&device),
@ -172,6 +160,86 @@ impl<T: ?Sized> DeviceLocalBuffer<T> {
marker: PhantomData,
}))
}
/// Same as `raw` but with exportable fd option for the allocated memory on Linux
#[cfg(target_os = "linux")]
pub unsafe fn raw_with_exportable_fd<'a, I>(
device: Arc<Device>,
size: usize,
usage: BufferUsage,
queue_families: I,
) -> Result<Arc<DeviceLocalBuffer<T>>, DeviceMemoryAllocError>
where
I: IntoIterator<Item = QueueFamily<'a>>,
{
assert!(device.loaded_extensions().khr_external_memory_fd);
assert!(device.loaded_extensions().khr_external_memory);
let queue_families = queue_families
.into_iter()
.map(|f| f.id())
.collect::<SmallVec<[u32; 4]>>();
let (buffer, mem_reqs) = Self::build_buffer(&device, size, usage, &queue_families)?;
let mem = MemoryPool::alloc_from_requirements_with_exportable_fd(
&Device::standard_pool(&device),
&mem_reqs,
AllocLayout::Linear,
MappingRequirement::DoNotMap,
DedicatedAlloc::Buffer(&buffer),
|t| {
if t.is_device_local() {
AllocFromRequirementsFilter::Preferred
} else {
AllocFromRequirementsFilter::Allowed
}
},
)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
buffer.bind_memory(mem.memory(), mem.offset())?;
Ok(Arc::new(DeviceLocalBuffer {
inner: buffer,
memory: mem,
queue_families: queue_families,
gpu_lock: Mutex::new(GpuAccess::None),
marker: PhantomData,
}))
}
unsafe fn build_buffer(
device: &Arc<Device>,
size: usize,
usage: BufferUsage,
queue_families: &SmallVec<[u32; 4]>,
) -> Result<(UnsafeBuffer, MemoryRequirements), DeviceMemoryAllocError> {
let (buffer, mem_reqs) = {
let sharing = if queue_families.len() >= 2 {
Sharing::Concurrent(queue_families.iter().cloned())
} else {
Sharing::Exclusive
};
match UnsafeBuffer::new(device.clone(), size, usage, sharing, SparseLevel::none()) {
Ok(b) => b,
Err(BufferCreationError::AllocError(err)) => return Err(err),
Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
// errors can't happen
}
};
Ok((buffer, mem_reqs))
}
/// Exports posix file descriptor for the allocated memory
/// requires `khr_external_memory_fd` and `khr_external_memory` extensions to be loaded.
/// Only works on Linux.
#[cfg(target_os = "linux")]
pub fn export_posix_fd(&self) -> Result<File, DeviceMemoryAllocError> {
self.memory
.memory()
.export_fd(ExternalMemoryHandleType::posix())
}
}
impl<T: ?Sized, A> DeviceLocalBuffer<T, A> {

View File

@ -323,8 +323,87 @@ impl DeviceMemory {
assert!(memory_type.is_host_visible());
let mem = DeviceMemory::dedicated_alloc(device.clone(), memory_type, size, resource)?;
let coherent = memory_type.is_host_coherent();
Self::map_allocation(device.clone(), mem)
}
/// Same as `alloc`, but allows exportable file descriptor on Linux.
#[inline]
#[cfg(target_os = "linux")]
pub fn alloc_with_exportable_fd(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
) -> Result<DeviceMemory, DeviceMemoryAllocError> {
DeviceMemoryBuilder::new(device, memory_type, size)
.export_info(ExternalMemoryHandleType {
opaque_fd: true,
..ExternalMemoryHandleType::none()
})
.build()
}
/// Same as `dedicated_alloc`, but allows exportable file descriptor on Linux.
#[inline]
#[cfg(target_os = "linux")]
pub fn dedicated_alloc_with_exportable_fd(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
resource: DedicatedAlloc,
) -> Result<DeviceMemory, DeviceMemoryAllocError> {
DeviceMemoryBuilder::new(device, memory_type, size)
.export_info(ExternalMemoryHandleType {
opaque_fd: true,
..ExternalMemoryHandleType::none()
})
.dedicated_info(resource)
.build()
}
/// Same as `alloc_and_map`, but allows exportable file descriptor on Linux.
#[inline]
#[cfg(target_os = "linux")]
pub fn alloc_and_map_with_exportable_fd(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
DeviceMemory::dedicated_alloc_and_map_with_exportable_fd(
device,
memory_type,
size,
DedicatedAlloc::None,
)
}
/// Same as `dedicated_alloc_and_map`, but allows exportable file descriptor on Linux.
#[inline]
#[cfg(target_os = "linux")]
pub fn dedicated_alloc_and_map_with_exportable_fd(
device: Arc<Device>,
memory_type: MemoryType,
size: usize,
resource: DedicatedAlloc,
) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
let vk = device.pointers();
assert!(memory_type.is_host_visible());
let mem = DeviceMemory::dedicated_alloc_with_exportable_fd(
device.clone(),
memory_type,
size,
resource,
)?;
Self::map_allocation(device.clone(), mem)
}
fn map_allocation(
device: Arc<Device>,
mem: DeviceMemory,
) -> Result<MappedDeviceMemory, DeviceMemoryAllocError> {
let vk = device.pointers();
let coherent = mem.memory_type().is_host_coherent();
let ptr = unsafe {
let mut output = MaybeUninit::uninit();
check_errors(vk.MapMemory(
@ -341,7 +420,7 @@ impl DeviceMemory {
Ok(MappedDeviceMemory {
memory: mem,
pointer: ptr,
coherent: coherent,
coherent,
})
}

View File

@ -60,6 +60,23 @@ impl ExternalMemoryHandleType {
}
}
/// Builds an `ExternalMemoryHandleType` for a posix file descriptor.
///
/// # Example
///
/// ```rust
/// use vulkano::memory::ExternalMemoryHandleType as ExternalMemoryHandleType;
///
/// let _handle_type = ExternalMemoryHandleType::posix();
/// ```
#[inline]
pub fn posix() -> ExternalMemoryHandleType {
ExternalMemoryHandleType {
opaque_fd: true,
..ExternalMemoryHandleType::none()
}
}
#[inline]
pub(crate) fn to_bits(&self) -> vk::ExternalMemoryHandleTypeFlagBits {
let mut result = 0;

View File

@ -127,6 +127,76 @@ impl StdHostVisibleMemoryTypePool {
})
}
/// Same as `alloc` but with exportable memory fd on Linux.
#[cfg(target_os = "linux")]
pub fn alloc_with_exportable_fd(
me: &Arc<Self>,
size: usize,
alignment: usize,
) -> Result<StdHostVisibleMemoryTypePoolAlloc, DeviceMemoryAllocError> {
assert!(size != 0);
assert!(alignment != 0);
#[inline]
fn align(val: usize, al: usize) -> usize {
al * (1 + (val - 1) / al)
}
// Find a location.
let mut occupied = me.occupied.lock().unwrap();
// Try finding an entry in already-allocated chunks.
for &mut (ref dev_mem, ref mut entries) in occupied.iter_mut() {
// Try find some free space in-between two entries.
for i in 0..entries.len().saturating_sub(1) {
let entry1 = entries[i].clone();
let entry1_end = align(entry1.end, alignment);
let entry2 = entries[i + 1].clone();
if entry1_end + size <= entry2.start {
entries.insert(i + 1, entry1_end..entry1_end + size);
return Ok(StdHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: dev_mem.clone(),
offset: entry1_end,
size,
});
}
}
// Try append at the end.
let last_end = entries.last().map(|e| align(e.end, alignment)).unwrap_or(0);
if last_end + size <= (**dev_mem).as_ref().size() {
entries.push(last_end..last_end + size);
return Ok(StdHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: dev_mem.clone(),
offset: last_end,
size,
});
}
}
// We need to allocate a new block.
let new_block = {
const MIN_BLOCK_SIZE: usize = 8 * 1024 * 1024; // 8 MB
let to_alloc = cmp::max(MIN_BLOCK_SIZE, size.next_power_of_two());
let new_block = DeviceMemory::alloc_and_map_with_exportable_fd(
me.device.clone(),
me.memory_type(),
to_alloc,
)?;
Arc::new(new_block)
};
occupied.push((new_block.clone(), vec![0..size]));
Ok(StdHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: new_block,
offset: 0,
size,
})
}
/// Returns the device this pool operates on.
#[inline]
pub fn device(&self) -> &Arc<Device> {

View File

@ -7,7 +7,7 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use device::DeviceOwned;
use device::{Device, DeviceOwned};
use instance::MemoryType;
use memory::DedicatedAlloc;
use memory::DeviceMemory;
@ -21,11 +21,47 @@ pub use self::non_host_visible::StdNonHostVisibleMemoryTypePool;
pub use self::non_host_visible::StdNonHostVisibleMemoryTypePoolAlloc;
pub use self::pool::StdMemoryPool;
pub use self::pool::StdMemoryPoolAlloc;
use std::sync::Arc;
mod host_visible;
mod non_host_visible;
mod pool;
fn choose_allocation_memory_type<'s, F>(
device: &'s Arc<Device>,
requirements: &MemoryRequirements,
mut filter: F,
map: MappingRequirement,
) -> MemoryType<'s>
where
F: FnMut(MemoryType) -> AllocFromRequirementsFilter,
{
let mem_ty = {
let mut filter = |ty: MemoryType| {
if map == MappingRequirement::Map && !ty.is_host_visible() {
return AllocFromRequirementsFilter::Forbidden;
}
filter(ty)
};
let first_loop = device
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Preferred));
let second_loop = device
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Allowed));
first_loop
.chain(second_loop)
.filter(|&(t, _)| (requirements.memory_type_bits & (1 << t.id())) != 0)
.filter(|&(t, rq)| filter(t) == rq)
.next()
.expect("Couldn't find a memory type to allocate from")
.0
};
mem_ty
}
/// Pool of GPU-visible memory that can be allocated from.
pub unsafe trait MemoryPool: DeviceOwned {
/// Object that represents a single allocation. Its destructor should free the chunk.
@ -61,6 +97,17 @@ pub unsafe trait MemoryPool: DeviceOwned {
map: MappingRequirement,
) -> Result<Self::Alloc, DeviceMemoryAllocError>;
/// Same as `alloc_generic` but with exportable memory option.
#[cfg(target_os = "linux")]
fn alloc_generic_with_exportable_fd(
&self,
ty: MemoryType,
size: usize,
alignment: usize,
layout: AllocLayout,
map: MappingRequirement,
) -> Result<Self::Alloc, DeviceMemoryAllocError>;
/// Chooses a memory type and allocates memory from it.
///
/// Contrary to `alloc_generic`, this function may allocate a whole new block of memory
@ -96,37 +143,13 @@ pub unsafe trait MemoryPool: DeviceOwned {
layout: AllocLayout,
map: MappingRequirement,
dedicated: DedicatedAlloc,
mut filter: F,
filter: F,
) -> Result<PotentialDedicatedAllocation<Self::Alloc>, DeviceMemoryAllocError>
where
F: FnMut(MemoryType) -> AllocFromRequirementsFilter,
{
// Choose a suitable memory type.
let mem_ty = {
let mut filter = |ty: MemoryType| {
if map == MappingRequirement::Map && !ty.is_host_visible() {
return AllocFromRequirementsFilter::Forbidden;
}
filter(ty)
};
let first_loop = self
.device()
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Preferred));
let second_loop = self
.device()
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Allowed));
first_loop
.chain(second_loop)
.filter(|&(t, _)| (requirements.memory_type_bits & (1 << t.id())) != 0)
.filter(|&(t, rq)| filter(t) == rq)
.next()
.expect("Couldn't find a memory type to allocate from")
.0
};
let mem_ty = choose_allocation_memory_type(self.device(), requirements, filter, map);
// Redirect to `self.alloc_generic` if we don't perform a dedicated allocation.
if !requirements.prefer_dedicated
@ -174,6 +197,69 @@ pub unsafe trait MemoryPool: DeviceOwned {
}
}
}
/// Same as `alloc_from_requirements` but with exportable fd option on Linux.
#[cfg(target_os = "linux")]
fn alloc_from_requirements_with_exportable_fd<F>(
&self,
requirements: &MemoryRequirements,
layout: AllocLayout,
map: MappingRequirement,
dedicated: DedicatedAlloc,
filter: F,
) -> Result<PotentialDedicatedAllocation<Self::Alloc>, DeviceMemoryAllocError>
where
F: FnMut(MemoryType) -> AllocFromRequirementsFilter,
{
assert!(self.device().loaded_extensions().khr_external_memory_fd);
assert!(self.device().loaded_extensions().khr_external_memory);
let mem_ty = choose_allocation_memory_type(self.device(), requirements, filter, map);
if !requirements.prefer_dedicated
|| !self.device().loaded_extensions().khr_dedicated_allocation
{
let alloc = self.alloc_generic_with_exportable_fd(
mem_ty,
requirements.size,
requirements.alignment,
layout,
map,
)?;
return Ok(alloc.into());
}
if let DedicatedAlloc::None = dedicated {
let alloc = self.alloc_generic_with_exportable_fd(
mem_ty,
requirements.size,
requirements.alignment,
layout,
map,
)?;
return Ok(alloc.into());
}
match map {
MappingRequirement::Map => {
let mem = DeviceMemory::dedicated_alloc_and_map_with_exportable_fd(
self.device().clone(),
mem_ty,
requirements.size,
dedicated,
)?;
Ok(PotentialDedicatedAllocation::DedicatedMapped(mem))
}
MappingRequirement::DoNotMap => {
let mem = DeviceMemory::dedicated_alloc_with_exportable_fd(
self.device().clone(),
mem_ty,
requirements.size,
dedicated,
)?;
Ok(PotentialDedicatedAllocation::Dedicated(mem))
}
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]

View File

@ -126,6 +126,76 @@ impl StdNonHostVisibleMemoryTypePool {
})
}
/// Same as `alloc` but with exportable memory fd on Linux.
#[cfg(target_os = "linux")]
pub fn alloc_with_exportable_fd(
me: &Arc<Self>,
size: usize,
alignment: usize,
) -> Result<StdNonHostVisibleMemoryTypePoolAlloc, DeviceMemoryAllocError> {
assert!(size != 0);
assert!(alignment != 0);
#[inline]
fn align(val: usize, al: usize) -> usize {
al * (1 + (val - 1) / al)
}
// Find a location.
let mut occupied = me.occupied.lock().unwrap();
// Try finding an entry in already-allocated chunks.
for &mut (ref dev_mem, ref mut entries) in occupied.iter_mut() {
// Try find some free space in-between two entries.
for i in 0..entries.len().saturating_sub(1) {
let entry1 = entries[i].clone();
let entry1_end = align(entry1.end, alignment);
let entry2 = entries[i + 1].clone();
if entry1_end + size <= entry2.start {
entries.insert(i + 1, entry1_end..entry1_end + size);
return Ok(StdNonHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: dev_mem.clone(),
offset: entry1_end,
size,
});
}
}
// Try append at the end.
let last_end = entries.last().map(|e| align(e.end, alignment)).unwrap_or(0);
if last_end + size <= dev_mem.size() {
entries.push(last_end..last_end + size);
return Ok(StdNonHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: dev_mem.clone(),
offset: last_end,
size,
});
}
}
// We need to allocate a new block.
let new_block = {
const MIN_BLOCK_SIZE: usize = 8 * 1024 * 1024; // 8 MB
let to_alloc = cmp::max(MIN_BLOCK_SIZE, size.next_power_of_two());
let new_block = DeviceMemory::alloc_with_exportable_fd(
me.device.clone(),
me.memory_type(),
to_alloc,
)?;
Arc::new(new_block)
};
occupied.push((new_block.clone(), vec![0..size]));
Ok(StdNonHostVisibleMemoryTypePoolAlloc {
pool: me.clone(),
memory: new_block,
offset: 0,
size,
})
}
/// Returns the device this pool operates on.
#[inline]
pub fn device(&self) -> &Arc<Device> {

View File

@ -52,6 +52,130 @@ impl StdMemoryPool {
}
}
fn generic_allocation(
mem_pool: Arc<StdMemoryPool>,
memory_type: MemoryType,
size: usize,
alignment: usize,
layout: AllocLayout,
map: MappingRequirement,
) -> Result<StdMemoryPoolAlloc, DeviceMemoryAllocError> {
let mut pools = mem_pool.pools.lock().unwrap();
let memory_type_host_visible = memory_type.is_host_visible();
assert!(memory_type_host_visible || map == MappingRequirement::DoNotMap);
match pools.entry((memory_type.id(), layout, map)) {
Entry::Occupied(entry) => match entry.get() {
&Pool::HostVisible(ref pool) => {
let alloc = StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner,
pool: mem_pool.clone(),
})
}
&Pool::NonHostVisible(ref pool) => {
let alloc = StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::NonHostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner,
pool: mem_pool.clone(),
})
}
},
Entry::Vacant(entry) => {
if memory_type_host_visible {
let pool = StdHostVisibleMemoryTypePool::new(mem_pool.device.clone(), memory_type);
entry.insert(Pool::HostVisible(pool.clone()));
let alloc = StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner,
pool: mem_pool.clone(),
})
} else {
let pool =
StdNonHostVisibleMemoryTypePool::new(mem_pool.device.clone(), memory_type);
entry.insert(Pool::NonHostVisible(pool.clone()));
let alloc = StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::NonHostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner,
pool: mem_pool.clone(),
})
}
}
}
}
/// Same as `generic_allocation` but with exportable memory fd on Linux.
#[cfg(target_os = "linux")]
fn generit_allocation_with_exportable_fd(
mem_pool: Arc<StdMemoryPool>,
memory_type: MemoryType,
size: usize,
alignment: usize,
layout: AllocLayout,
map: MappingRequirement,
) -> Result<StdMemoryPoolAlloc, DeviceMemoryAllocError> {
let mut pools = mem_pool.pools.lock().unwrap();
let memory_type_host_visible = memory_type.is_host_visible();
assert!(memory_type_host_visible || map == MappingRequirement::DoNotMap);
match pools.entry((memory_type.id(), layout, map)) {
Entry::Occupied(entry) => match entry.get() {
&Pool::HostVisible(ref pool) => {
let alloc =
StdHostVisibleMemoryTypePool::alloc_with_exportable_fd(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner,
pool: mem_pool.clone(),
})
}
&Pool::NonHostVisible(ref pool) => {
let alloc = StdNonHostVisibleMemoryTypePool::alloc_with_exportable_fd(
&pool, size, alignment,
)?;
let inner = StdMemoryPoolAllocInner::NonHostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner,
pool: mem_pool.clone(),
})
}
},
Entry::Vacant(entry) => {
if memory_type_host_visible {
let pool = StdHostVisibleMemoryTypePool::new(mem_pool.device.clone(), memory_type);
entry.insert(Pool::HostVisible(pool.clone()));
let alloc =
StdHostVisibleMemoryTypePool::alloc_with_exportable_fd(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner,
pool: mem_pool.clone(),
})
} else {
let pool =
StdNonHostVisibleMemoryTypePool::new(mem_pool.device.clone(), memory_type);
entry.insert(Pool::NonHostVisible(pool.clone()));
let alloc = StdNonHostVisibleMemoryTypePool::alloc_with_exportable_fd(
&pool, size, alignment,
)?;
let inner = StdMemoryPoolAllocInner::NonHostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner,
pool: mem_pool.clone(),
})
}
}
}
}
unsafe impl MemoryPool for Arc<StdMemoryPool> {
type Alloc = StdMemoryPoolAlloc;
@ -63,54 +187,27 @@ unsafe impl MemoryPool for Arc<StdMemoryPool> {
layout: AllocLayout,
map: MappingRequirement,
) -> Result<StdMemoryPoolAlloc, DeviceMemoryAllocError> {
let mut pools = self.pools.lock().unwrap();
generic_allocation(self.clone(), memory_type, size, alignment, layout, map)
}
let memory_type_host_visible = memory_type.is_host_visible();
assert!(memory_type_host_visible || map == MappingRequirement::DoNotMap);
match pools.entry((memory_type.id(), layout, map)) {
Entry::Occupied(entry) => match entry.get() {
&Pool::HostVisible(ref pool) => {
let alloc = StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner: inner,
pool: self.clone(),
})
}
&Pool::NonHostVisible(ref pool) => {
let alloc = StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::NonHostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner: inner,
pool: self.clone(),
})
}
},
Entry::Vacant(entry) => {
if memory_type_host_visible {
let pool = StdHostVisibleMemoryTypePool::new(self.device.clone(), memory_type);
entry.insert(Pool::HostVisible(pool.clone()));
let alloc = StdHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::HostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner: inner,
pool: self.clone(),
})
} else {
let pool =
StdNonHostVisibleMemoryTypePool::new(self.device.clone(), memory_type);
entry.insert(Pool::NonHostVisible(pool.clone()));
let alloc = StdNonHostVisibleMemoryTypePool::alloc(&pool, size, alignment)?;
let inner = StdMemoryPoolAllocInner::NonHostVisible(alloc);
Ok(StdMemoryPoolAlloc {
inner: inner,
pool: self.clone(),
})
}
}
}
/// Same as `alloc_generic` but with exportable fd option on Linux.
#[cfg(target_os = "linux")]
fn alloc_generic_with_exportable_fd(
&self,
memory_type: MemoryType,
size: usize,
alignment: usize,
layout: AllocLayout,
map: MappingRequirement,
) -> Result<StdMemoryPoolAlloc, DeviceMemoryAllocError> {
generit_allocation_with_exportable_fd(
self.clone(),
memory_type,
size,
alignment,
layout,
map,
)
}
}