Finish implementing dedicated allocation (#731)

* Finish implementing dedicated allocation

* Fix mapping inversion

* Remove arbitrary dedicated alloc determination

* Turn alloc() into alloc_from_requirements()
This commit is contained in:
tomaka 2017-08-11 10:24:53 +02:00 committed by GitHub
parent 18559a94cb
commit a64666b62f
10 changed files with 261 additions and 133 deletions

View File

@ -44,18 +44,21 @@ use device::Queue;
use instance::QueueFamily;
use memory::Content;
use memory::CpuAccess as MemCpuAccess;
use memory::DedicatedAlloc;
use memory::DeviceMemoryAllocError;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::AllocLayout;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPoolAlloc;
use sync::AccessError;
use sync::Sharing;
/// Buffer whose content is accessible by the CPU.
#[derive(Debug)]
pub struct CpuAccessibleBuffer<T: ?Sized, A = StdMemoryPoolAlloc> {
pub struct CpuAccessibleBuffer<T: ?Sized, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
// Inner content.
inner: UnsafeBuffer,
@ -194,20 +197,12 @@ impl<T: ?Sized> CpuAccessibleBuffer<T> {
}
};
let mem_ty = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_host_visible())
.next()
.unwrap(); // Vk specs guarantee that this can't fail
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
let mem = MemoryPool::alloc_from_requirements(&Device::standard_pool(&device),
&mem_reqs,
AllocLayout::Linear,
MappingRequirement::Map)?;
MappingRequirement::Map,
DedicatedAlloc::Buffer(&buffer),
|_| AllocFromRequirementsFilter::Allowed)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
debug_assert!(mem.mapped_memory().is_some());
buffer.bind_memory(mem.memory(), mem.offset())?;

View File

@ -30,10 +30,13 @@ use device::Device;
use device::DeviceOwned;
use device::Queue;
use instance::QueueFamily;
use memory::DedicatedAlloc;
use memory::pool::AllocLayout;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPool;
use memory::DeviceMemoryAllocError;
use sync::AccessError;
@ -93,7 +96,7 @@ struct ActualBuffer<A>
inner: UnsafeBuffer,
// The memory held by the buffer.
memory: A::Alloc,
memory: PotentialDedicatedAllocation<A::Alloc>,
// List of the chunks that are reserved.
chunks_in_use: Mutex<Vec<ActualBufferChunk>>,
@ -324,20 +327,12 @@ impl<T, A> CpuBufferPool<T, A>
}
};
let mem_ty = self.device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_host_visible())
.next()
.unwrap(); // Vk specs guarantee that this can't fail
let mem = MemoryPool::alloc(&self.pool,
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
let mem = MemoryPool::alloc_from_requirements(&self.pool,
&mem_reqs,
AllocLayout::Linear,
MappingRequirement::Map)?;
MappingRequirement::Map,
DedicatedAlloc::Buffer(&buffer),
|_| AllocFromRequirementsFilter::Allowed)?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
debug_assert!(mem.mapped_memory().is_some());
buffer.bind_memory(mem.memory(), mem.offset())?;

View File

@ -30,10 +30,13 @@ use device::Device;
use device::DeviceOwned;
use device::Queue;
use instance::QueueFamily;
use memory::DedicatedAlloc;
use memory::pool::AllocLayout;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPoolAlloc;
use memory::DeviceMemoryAllocError;
use sync::AccessError;
@ -48,7 +51,7 @@ use sync::Sharing;
/// The `DeviceLocalBuffer` will be in device-local memory, unless the device doesn't provide any
/// device-local memory.
#[derive(Debug)]
pub struct DeviceLocalBuffer<T: ?Sized, A = StdMemoryPoolAlloc> {
pub struct DeviceLocalBuffer<T: ?Sized, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
// Inner content.
inner: UnsafeBuffer,
@ -128,25 +131,16 @@ impl<T: ?Sized> DeviceLocalBuffer<T> {
}
};
let mem_ty = {
let device_local = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
device_local.chain(any).next().unwrap()
};
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
let mem = MemoryPool::alloc_from_requirements(&Device::standard_pool(&device),
&mem_reqs,
AllocLayout::Linear,
MappingRequirement::DoNotMap)?;
MappingRequirement::DoNotMap,
DedicatedAlloc::Buffer(&buffer),
|t| if t.is_device_local() {
AllocFromRequirementsFilter::Preferred
} else {
AllocFromRequirementsFilter::Allowed
})?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
buffer.bind_memory(mem.memory(), mem.offset())?;

View File

@ -42,11 +42,14 @@ use device::Device;
use device::DeviceOwned;
use device::Queue;
use instance::QueueFamily;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::AllocLayout;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPoolAlloc;
use memory::DedicatedAlloc;
use memory::DeviceMemoryAllocError;
use sync::AccessError;
use sync::NowFuture;
@ -54,7 +57,7 @@ use sync::Sharing;
/// Buffer that is written once then read for as long as it is alive.
// TODO: implement Debug
pub struct ImmutableBuffer<T: ?Sized, A = StdMemoryPoolAlloc> {
pub struct ImmutableBuffer<T: ?Sized, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
// Inner content.
inner: UnsafeBuffer,
@ -252,25 +255,16 @@ impl<T: ?Sized> ImmutableBuffer<T> {
}
};
let mem_ty = {
let device_local = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
device_local.chain(any).next().unwrap()
};
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
AllocLayout::Linear,
MappingRequirement::DoNotMap)?;
let mem = MemoryPool::alloc_from_requirements(&Device::standard_pool(&device),
&mem_reqs,
AllocLayout::Linear,
MappingRequirement::DoNotMap,
DedicatedAlloc::Buffer(&buffer),
|t| if t.is_device_local() {
AllocFromRequirementsFilter::Preferred
} else {
AllocFromRequirementsFilter::Allowed
})?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
buffer.bind_memory(mem.memory(), mem.offset())?;
@ -368,7 +362,7 @@ unsafe impl<T: ?Sized, A> DeviceOwned for ImmutableBuffer<T, A> {
/// Access to the immutable buffer that can be used for the initial upload.
//#[derive(Debug)] // TODO:
pub struct ImmutableBufferInitialization<T: ?Sized, A = StdMemoryPoolAlloc> {
pub struct ImmutableBufferInitialization<T: ?Sized, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
buffer: Arc<ImmutableBuffer<T, A>>,
used: Arc<AtomicBool>,
}

View File

@ -31,10 +31,13 @@ use image::traits::ImageAccess;
use image::traits::ImageClearValue;
use image::traits::ImageContent;
use image::traits::ImageViewAccess;
use memory::DedicatedAlloc;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::AllocLayout;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPoolAlloc;
use sync::AccessError;
use sync::Sharing;
@ -69,7 +72,7 @@ use sync::Sharing;
///
// TODO: forbid reading transient images outside render passes?
#[derive(Debug)]
pub struct AttachmentImage<F = Format, A = StdMemoryPoolAlloc> {
pub struct AttachmentImage<F = Format, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
// Inner implementation.
image: UnsafeImage,
@ -364,25 +367,16 @@ impl<F> AttachmentImage<F> {
false)?
};
let mem_ty = {
let device_local = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
device_local.chain(any).next().unwrap()
};
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
let mem = MemoryPool::alloc_from_requirements(&Device::standard_pool(&device),
&mem_reqs,
AllocLayout::Optimal,
MappingRequirement::DoNotMap)?;
MappingRequirement::DoNotMap,
DedicatedAlloc::Image(&image),
|t| if t.is_device_local() {
AllocFromRequirementsFilter::Preferred
} else {
AllocFromRequirementsFilter::Allowed
})?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
unsafe {
image.bind_memory(mem.memory(), mem.offset())?;

View File

@ -38,10 +38,13 @@ use image::traits::ImageAccess;
use image::traits::ImageContent;
use image::traits::ImageViewAccess;
use instance::QueueFamily;
use memory::DedicatedAlloc;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::AllocLayout;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPoolAlloc;
use sync::AccessError;
use sync::Sharing;
@ -51,7 +54,7 @@ use sync::NowFuture;
/// but then you must only ever read from it.
// TODO: type (2D, 3D, array, etc.) as template parameter
#[derive(Debug)]
pub struct ImmutableImage<F, A = StdMemoryPoolAlloc> {
pub struct ImmutableImage<F, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
image: UnsafeImage,
view: UnsafeImageView,
dimensions: Dimensions,
@ -62,7 +65,7 @@ pub struct ImmutableImage<F, A = StdMemoryPoolAlloc> {
}
// Must not implement Clone, as that would lead to multiple `used` values.
pub struct ImmutableImageInitialization<F, A = StdMemoryPoolAlloc> {
pub struct ImmutableImageInitialization<F, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
image: Arc<ImmutableImage<F, A>>,
used: AtomicBool,
}
@ -137,25 +140,16 @@ impl<F> ImmutableImage<F> {
false)?
};
let mem_ty = {
let device_local = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
device_local.chain(any).next().unwrap()
};
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
let mem = MemoryPool::alloc_from_requirements(&Device::standard_pool(&device),
&mem_reqs,
AllocLayout::Optimal,
MappingRequirement::DoNotMap)?;
MappingRequirement::DoNotMap,
DedicatedAlloc::Image(&image),
|t| if t.is_device_local() {
AllocFromRequirementsFilter::Preferred
} else {
AllocFromRequirementsFilter::Allowed
})?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
unsafe {
image.bind_memory(mem.memory(), mem.offset())?;

View File

@ -30,10 +30,13 @@ use image::traits::ImageClearValue;
use image::traits::ImageContent;
use image::traits::ImageViewAccess;
use instance::QueueFamily;
use memory::DedicatedAlloc;
use memory::pool::AllocLayout;
use memory::pool::AllocFromRequirementsFilter;
use memory::pool::MappingRequirement;
use memory::pool::MemoryPool;
use memory::pool::MemoryPoolAlloc;
use memory::pool::PotentialDedicatedAllocation;
use memory::pool::StdMemoryPool;
use sync::AccessError;
use sync::Sharing;
@ -51,7 +54,7 @@ pub struct StorageImage<F, A = Arc<StdMemoryPool>>
view: UnsafeImageView,
// Memory used to back the image.
memory: A::Alloc,
memory: PotentialDedicatedAllocation<A::Alloc>,
// Dimensions of the image view.
dimensions: Dimensions,
@ -126,25 +129,16 @@ impl<F> StorageImage<F> {
false)?
};
let mem_ty = {
let device_local = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0)
.filter(|t| t.is_device_local());
let any = device
.physical_device()
.memory_types()
.filter(|t| (mem_reqs.memory_type_bits & (1 << t.id())) != 0);
device_local.chain(any).next().unwrap()
};
let mem = MemoryPool::alloc(&Device::standard_pool(&device),
mem_ty,
mem_reqs.size,
mem_reqs.alignment,
let mem = MemoryPool::alloc_from_requirements(&Device::standard_pool(&device),
&mem_reqs,
AllocLayout::Optimal,
MappingRequirement::DoNotMap)?;
MappingRequirement::DoNotMap,
DedicatedAlloc::Image(&image),
|t| if t.is_device_local() {
AllocFromRequirementsFilter::Preferred
} else {
AllocFromRequirementsFilter::Allowed
})?;
debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
unsafe {
image.bind_memory(mem.memory(), mem.offset())?;

View File

@ -118,8 +118,8 @@ pub struct MemoryRequirements {
pub memory_type_bits: u32,
/// True if the implementation prefers to use dedicated allocations (in other words, allocate
/// a whole block of memory dedicated to this resource alone). If the implementation doesn't
/// support dedicated allocations, this will be false.
/// a whole block of memory dedicated to this resource alone). If the
/// `khr_get_memory_requirements2` extension isn't enabled, then this will be false.
///
/// > **Note**: As its name says, using a dedicated allocation is an optimization and not a
/// > requirement.
@ -138,10 +138,20 @@ impl MemoryRequirements {
}
}
/// Indicates whether we want to allocate memory for a specific resource, or in a generic way.
///
/// Using dedicated allocations can yield faster performances, but requires the
/// `VK_KHR_dedicated_allocation` extension to be enabled on the device.
///
/// If a dedicated allocation is performed, it must only be bound to any resource other than the
/// one that was passed with the enumeration.
#[derive(Debug, Copy, Clone)]
pub enum DedicatedAlloc<'a> {
/// Generic allocation.
None,
/// Allocation dedicated to a buffer.
Buffer(&'a UnsafeBuffer),
/// Allocation dedicated to an image.
Image(&'a UnsafeImage),
}

View File

@ -7,9 +7,12 @@
// notice may not be copied, modified, or distributed except
// according to those terms.
use device::DeviceOwned;
use instance::MemoryType;
use memory::DedicatedAlloc;
use memory::DeviceMemory;
use memory::MappedDeviceMemory;
use memory::MemoryRequirements;
use memory::DeviceMemoryAllocError;
pub use self::host_visible::StdHostVisibleMemoryTypePool;
@ -24,7 +27,7 @@ mod non_host_visible;
mod pool;
/// Pool of GPU-visible memory that can be allocated from.
pub unsafe trait MemoryPool {
pub unsafe trait MemoryPool: DeviceOwned {
/// Object that represents a single allocation. Its destructor should free the chunk.
type Alloc: MemoryPoolAlloc;
@ -32,10 +35,15 @@ pub unsafe trait MemoryPool {
///
/// # Safety
///
/// Implementation safety:
///
/// - The returned object must match the requirements.
/// - When a linear object is allocated next to an optimal object, it is mandatory that
/// the boundary is aligned to the value of the `buffer_image_granularity` limit.
///
/// Note that it is not unsafe to *call* this function, but it is unsafe to bind the memory
/// returned by this function to a resource.
///
/// # Panic
///
/// - Panics if `memory_type` doesn't belong to the same physical device as the device which
@ -44,8 +52,102 @@ pub unsafe trait MemoryPool {
/// - Panics if `size` is 0.
/// - Panics if `alignment` is 0.
///
fn alloc(&self, ty: MemoryType, size: usize, alignment: usize, layout: AllocLayout,
map: MappingRequirement) -> Result<Self::Alloc, DeviceMemoryAllocError>;
fn alloc_generic(&self, ty: MemoryType, size: usize, alignment: usize, layout: AllocLayout,
map: MappingRequirement) -> Result<Self::Alloc, DeviceMemoryAllocError>;
/// Chooses a memory type and allocates memory from it.
///
/// Contrary to `alloc_generic`, this function may allocate a whole new block of memory
/// dedicated to a resource based on `requirements.prefer_dedicated`.
///
/// `filter` can be used to restrict the memory types and to indicate which are preferred.
/// If `map` is `MappingRequirement::Map`, then non-host-visible memory types will
/// automatically be filtered out.
///
/// # Safety
///
/// Implementation safety:
///
/// - The returned object must match the requirements.
/// - When a linear object is allocated next to an optimal object, it is mandatory that
/// the boundary is aligned to the value of the `buffer_image_granularity` limit.
/// - If `dedicated` is not `None`, the returned memory must either not be dedicated or be
/// dedicated to the resource that was passed.
///
/// Note that it is not unsafe to *call* this function, but it is unsafe to bind the memory
/// returned by this function to a resource.
///
/// # Panic
///
/// - Panics if no memory type could be found, which can happen if `filter` is too restrictive.
// TODO: ^ is this a good idea?
/// - Panics if `size` is 0.
/// - Panics if `alignment` is 0.
///
fn alloc_from_requirements<F>(&self, requirements: &MemoryRequirements, layout: AllocLayout,
map: MappingRequirement, dedicated: DedicatedAlloc, mut filter: F)
-> Result<PotentialDedicatedAllocation<Self::Alloc>, DeviceMemoryAllocError>
where F: FnMut(MemoryType) -> AllocFromRequirementsFilter
{
// Choose a suitable memory type.
let mem_ty = {
let mut filter = |ty: MemoryType| {
if map == MappingRequirement::Map && !ty.is_host_visible() {
return AllocFromRequirementsFilter::Forbidden;
}
filter(ty)
};
let first_loop = self.device()
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Preferred));
let second_loop = self.device()
.physical_device()
.memory_types()
.map(|t| (t, AllocFromRequirementsFilter::Allowed));
first_loop
.chain(second_loop)
.filter(|&(t, _)| (requirements.memory_type_bits & (1 << t.id())) != 0)
.filter(|&(t, rq)| filter(t) == rq)
.next()
.expect("Couldn't find a memory type to allocate from").0
};
// Redirect to `self.alloc_generic` if we don't perform a dedicated allocation.
if !requirements.prefer_dedicated ||
!self.device().loaded_extensions().khr_dedicated_allocation
{
let alloc = self.alloc_generic(mem_ty, requirements.size, requirements.alignment,
layout, map)?;
return Ok(alloc.into());
}
if let DedicatedAlloc::None = dedicated {
let alloc = self.alloc_generic(mem_ty, requirements.size, requirements.alignment,
layout, map)?;
return Ok(alloc.into());
}
// If we reach here, then we perform a dedicated alloc.
match map {
MappingRequirement::Map => {
let mem = DeviceMemory::dedicated_alloc_and_map(self.device().clone(), mem_ty,
requirements.size, dedicated)?;
Ok(PotentialDedicatedAllocation::DedicatedMapped(mem))
},
MappingRequirement::DoNotMap => {
let mem = DeviceMemory::dedicated_alloc(self.device().clone(), mem_ty,
requirements.size, dedicated)?;
Ok(PotentialDedicatedAllocation::Dedicated(mem))
},
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum AllocFromRequirementsFilter {
Preferred,
Allowed,
Forbidden,
}
/// Object that represents a single allocation. Its destructor should free the chunk.
@ -79,3 +181,50 @@ pub enum AllocLayout {
/// The object has an optimal layout.
Optimal,
}
/// Enumeration that can contain either a generic allocation coming from a pool, or a dedicated
/// allocation for one specific resource.
#[derive(Debug)]
pub enum PotentialDedicatedAllocation<A> {
Generic(A),
Dedicated(DeviceMemory),
DedicatedMapped(MappedDeviceMemory),
}
unsafe impl<A> MemoryPoolAlloc for PotentialDedicatedAllocation<A>
where A: MemoryPoolAlloc
{
#[inline]
fn mapped_memory(&self) -> Option<&MappedDeviceMemory> {
match *self {
PotentialDedicatedAllocation::Generic(ref alloc) => alloc.mapped_memory(),
PotentialDedicatedAllocation::Dedicated(ref mem) => None,
PotentialDedicatedAllocation::DedicatedMapped(ref mem) => Some(mem),
}
}
#[inline]
fn memory(&self) -> &DeviceMemory {
match *self {
PotentialDedicatedAllocation::Generic(ref alloc) => alloc.memory(),
PotentialDedicatedAllocation::Dedicated(ref mem) => mem,
PotentialDedicatedAllocation::DedicatedMapped(ref mem) => mem.as_ref(),
}
}
#[inline]
fn offset(&self) -> usize {
match *self {
PotentialDedicatedAllocation::Generic(ref alloc) => alloc.offset(),
PotentialDedicatedAllocation::Dedicated(_) => 0,
PotentialDedicatedAllocation::DedicatedMapped(_) => 0,
}
}
}
impl<A> From<A> for PotentialDedicatedAllocation<A> {
#[inline]
fn from(alloc: A) -> PotentialDedicatedAllocation<A> {
PotentialDedicatedAllocation::Generic(alloc)
}
}

View File

@ -15,6 +15,7 @@ use std::sync::Arc;
use std::sync::Mutex;
use device::Device;
use device::DeviceOwned;
use instance::MemoryType;
use memory::DeviceMemory;
use memory::MappedDeviceMemory;
@ -53,8 +54,9 @@ impl StdMemoryPool {
unsafe impl MemoryPool for Arc<StdMemoryPool> {
type Alloc = StdMemoryPoolAlloc;
fn alloc(&self, memory_type: MemoryType, size: usize, alignment: usize, layout: AllocLayout,
map: MappingRequirement) -> Result<StdMemoryPoolAlloc, DeviceMemoryAllocError> {
fn alloc_generic(&self, memory_type: MemoryType, size: usize, alignment: usize,
layout: AllocLayout, map: MappingRequirement)
-> Result<StdMemoryPoolAlloc, DeviceMemoryAllocError> {
let mut pools = self.pools.lock().unwrap();
let memory_type_host_visible = memory_type.is_host_visible();
@ -109,6 +111,13 @@ unsafe impl MemoryPool for Arc<StdMemoryPool> {
}
}
unsafe impl DeviceOwned for StdMemoryPool {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
#[derive(Debug)]
enum Pool {
HostVisible(Arc<StdHostVisibleMemoryTypePool>),