Simplify BufferAccess and ImageAccess (#811)

This commit is contained in:
tomaka 2017-09-09 09:11:58 +02:00 committed by GitHub
parent bdf026a78b
commit 9abc9e431d
14 changed files with 243 additions and 216 deletions

View File

@ -1,5 +1,7 @@
# Unreleased (major)
- Changed `BufferAccess::conflict_*` and `ImageAccess::conflict_*` to forbid querying a specific
range of the resource.
- Changed `CpuBufferPool::next()` and `chunk()` to return a `Result` in case of an error when
allocating or mapping memory.

View File

@ -42,6 +42,7 @@ use buffer::traits::TypedBufferAccess;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use image::ImageAccess;
use instance::QueueFamily;
use memory::Content;
use memory::CpuAccess as MemCpuAccess;
@ -317,7 +318,17 @@ unsafe impl<T: ?Sized, A> BufferAccess for CpuAccessibleBuffer<T, A>
}
#[inline]
fn conflict_key(&self, _: usize, _: usize) -> u64 {
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
self.conflict_key() == other.conflict_key() // TODO:
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
false
}
#[inline]
fn conflict_key(&self) -> u64 {
self.inner.key()
}

View File

@ -28,6 +28,7 @@ use buffer::traits::TypedBufferAccess;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use image::ImageAccess;
use memory::DedicatedAlloc;
use memory::DeviceMemoryAllocError;
use memory::pool::AllocFromRequirementsFilter;
@ -609,7 +610,17 @@ unsafe impl<T, A> BufferAccess for CpuBufferPoolChunk<T, A>
}
#[inline]
fn conflict_key(&self, _: usize, _: usize) -> u64 {
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
self.conflict_key() == other.conflict_key() // TODO:
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
false
}
#[inline]
fn conflict_key(&self) -> u64 {
self.buffer.inner.key() + self.index as u64
}
@ -730,8 +741,18 @@ unsafe impl<T, A> BufferAccess for CpuBufferPoolSubbuffer<T, A>
}
#[inline]
fn conflict_key(&self, a: usize, b: usize) -> u64 {
self.chunk.conflict_key(a, b)
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
self.conflict_key() == other.conflict_key() // TODO:
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
false
}
#[inline]
fn conflict_key(&self) -> u64 {
self.chunk.conflict_key()
}
#[inline]

View File

@ -29,6 +29,7 @@ use buffer::traits::TypedBufferAccess;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use image::ImageAccess;
use instance::QueueFamily;
use memory::DedicatedAlloc;
use memory::DeviceMemoryAllocError;
@ -195,7 +196,17 @@ unsafe impl<T: ?Sized, A> BufferAccess for DeviceLocalBuffer<T, A>
}
#[inline]
fn conflict_key(&self, _: usize, _: usize) -> u64 {
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
self.conflict_key() == other.conflict_key() // TODO:
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
false
}
#[inline]
fn conflict_key(&self) -> u64 {
self.inner.key()
}

View File

@ -40,6 +40,7 @@ use command_buffer::CommandBufferExecFuture;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use image::ImageAccess;
use instance::QueueFamily;
use memory::DedicatedAlloc;
use memory::DeviceMemoryAllocError;
@ -327,7 +328,17 @@ unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBuffer<T, A> {
}
#[inline]
fn conflict_key(&self, _: usize, _: usize) -> u64 {
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
self.conflict_key() == other.conflict_key() // TODO:
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
false
}
#[inline]
fn conflict_key(&self) -> u64 {
self.inner.key()
}
@ -383,7 +394,17 @@ unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBufferInitialization<T, A> {
}
#[inline]
fn conflict_key(&self, _: usize, _: usize) -> u64 {
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
self.conflict_key() == other.conflict_key() // TODO:
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
false
}
#[inline]
fn conflict_key(&self) -> u64 {
self.buffer.inner.key()
}

View File

@ -6,18 +6,19 @@
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::marker::PhantomData;
use std::mem;
use std::ops::Range;
use std::sync::Arc;
use buffer::traits::BufferAccess;
use buffer::traits::BufferInner;
use buffer::traits::TypedBufferAccess;
use device::Device;
use device::DeviceOwned;
use device::Queue;
use image::ImageAccess;
use sync::AccessError;
/// A subpart of a buffer.
@ -194,22 +195,18 @@ unsafe impl<T: ?Sized, B> BufferAccess for BufferSlice<T, B>
}
#[inline]
fn conflicts_buffer(&self, self_offset: usize, self_size: usize, other: &BufferAccess,
other_offset: usize, other_size: usize)
-> bool {
let self_offset = self.offset + self_offset;
// FIXME: spurious failures ; needs investigation
//debug_assert!(self_size + self_offset <= self.size);
self.resource
.conflicts_buffer(self_offset, self_size, other, other_offset, other_size)
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
self.resource.conflicts_buffer(other)
}
#[inline]
fn conflict_key(&self, self_offset: usize, self_size: usize) -> u64 {
let self_offset = self.offset + self_offset;
// FIXME: spurious failures ; needs investigation
//debug_assert!(self_size + self_offset <= self.size);
self.resource.conflict_key(self_offset, self_size)
fn conflicts_image(&self, other: &ImageAccess) -> bool {
self.resource.conflicts_image(other)
}
#[inline]
fn conflict_key(&self) -> u64 {
self.resource.conflict_key()
}
#[inline]
@ -258,9 +255,9 @@ impl<T, B> From<BufferSlice<T, B>> for BufferSlice<[T], B> {
/// Takes a `BufferSlice` that points to a struct, and returns a `BufferSlice` that points to
/// a specific field of that struct.
#[macro_export]
macro_rules! buffer_slice_field {
($slice:expr, $field:ident) => (
// TODO: add #[allow(unsafe_code)] when that's allowed
unsafe { $slice.slice_custom(|s| &s.$field) }
)
macro_rules! buffer_slice_field {
($slice:expr, $field:ident) => (
// TODO: add #[allow(unsafe_code)] when that's allowed
unsafe { $slice.slice_custom(|s| &s.$field) }
)
}

View File

@ -18,7 +18,6 @@ use memory::Content;
use sync::AccessError;
use SafeDeref;
use VulkanObject;
/// Trait for objects that represent a way for the GPU to have access to a buffer or a slice of a
/// buffer.
@ -88,52 +87,30 @@ pub unsafe trait BufferAccess: DeviceOwned {
self.slice(index .. (index + 1))
}
/// Returns true if an access to `self` (as defined by `self_offset` and `self_size`)
/// potentially overlaps the same memory as an access to `other` (as defined by `other_offset`
/// and `other_size`).
/// Returns true if an access to `self` potentially overlaps the same memory as an access to
/// `other`.
///
/// If this function returns `false`, this means that we are allowed to access the offset/size
/// of `self` at the same time as the offset/size of `other` without causing a data race.
/// If this function returns `false`, this means that we are allowed to mutably access the
/// content of `self` at the same time as the content of `other` without causing a data
/// race.
///
/// Note that the function must be transitive. In other words if `conflicts(a, b)` is true and
/// `conflicts(b, c)` is true, then `conflicts(a, c)` must be true as well.
fn conflicts_buffer(&self, _self_offset: usize, self_size: usize, other: &BufferAccess,
_other_offset: usize, _other_size: usize)
-> bool {
// TODO: should we really provide a default implementation?
fn conflicts_buffer(&self, other: &BufferAccess) -> bool;
debug_assert!(self_size <= self.size());
if self.inner().buffer.internal_object() != other.inner().buffer.internal_object() {
return false;
}
true
}
/// Returns true if an access to `self` (as defined by `self_offset` and `self_size`)
/// potentially overlaps the same memory as an access to `other` (as defined by
/// `other_first_layer`, `other_num_layers`, `other_first_mipmap` and `other_num_mipmaps`).
/// Returns true if an access to `self` potentially overlaps the same memory as an access to
/// `other`.
///
/// If this function returns `false`, this means that we are allowed to access the offset/size
/// of `self` at the same time as the offset/size of `other` without causing a data race.
/// If this function returns `false`, this means that we are allowed to mutably access the
/// content of `self` at the same time as the content of `other` without causing a data
/// race.
///
/// Note that the function must be transitive. In other words if `conflicts(a, b)` is true and
/// `conflicts(b, c)` is true, then `conflicts(a, c)` must be true as well.
fn conflicts_image(&self, self_offset: usize, self_size: usize, other: &ImageAccess,
other_first_layer: u32, other_num_layers: u32, other_first_mipmap: u32,
other_num_mipmaps: u32)
-> bool {
let other_key = other.conflict_key(other_first_layer,
other_num_layers,
other_first_mipmap,
other_num_mipmaps);
self.conflict_key(self_offset, self_size) == other_key
}
fn conflicts_image(&self, other: &ImageAccess) -> bool;
/// Returns a key that uniquely identifies the range given by offset/size.
///
/// Two ranges that potentially overlap in memory should return the same key.
/// Returns a key that uniquely identifies the buffer. Two buffers or images that potentially
/// overlap in memory must return the same key.
///
/// The key is shared amongst all buffers and images, which means that you can make several
/// different buffer objects share the same memory, or make some buffer objects share memory
@ -142,34 +119,7 @@ pub unsafe trait BufferAccess: DeviceOwned {
/// Since it is possible to accidentally return the same key for memory ranges that don't
/// overlap, the `conflicts_buffer` or `conflicts_image` function should always be called to
/// verify whether they actually overlap.
fn conflict_key(&self, _self_offset: usize, _self_size: usize) -> u64 {
// FIXME: remove implementation
unimplemented!()
}
/// Shortcut for `conflicts_buffer` that compares the whole buffer to another.
#[inline]
fn conflicts_buffer_all(&self, other: &BufferAccess) -> bool {
self.conflicts_buffer(0, self.size(), other, 0, other.size())
}
/// Shortcut for `conflicts_image` that compares the whole buffer to a whole image.
#[inline]
fn conflicts_image_all(&self, other: &ImageAccess) -> bool {
self.conflicts_image(0,
self.size(),
other,
0,
other.dimensions().array_layers(),
0,
other.mipmap_levels())
}
/// Shortcut for `conflict_key` that grabs the key of the whole buffer.
#[inline]
fn conflict_key_all(&self) -> u64 {
self.conflict_key(0, self.size())
}
fn conflict_key(&self) -> u64;
/// Locks the resource for usage on the GPU. Returns an error if the lock can't be acquired.
///
@ -224,15 +174,18 @@ unsafe impl<T> BufferAccess for T
}
#[inline]
fn conflicts_buffer(&self, self_offset: usize, self_size: usize, other: &BufferAccess,
other_offset: usize, other_size: usize)
-> bool {
(**self).conflicts_buffer(self_offset, self_size, other, other_offset, other_size)
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
(**self).conflicts_buffer(other)
}
#[inline]
fn conflict_key(&self, self_offset: usize, self_size: usize) -> u64 {
(**self).conflict_key(self_offset, self_size)
fn conflicts_image(&self, other: &ImageAccess) -> bool {
(**self).conflicts_image(other)
}
#[inline]
fn conflict_key(&self) -> u64 {
(**self).conflict_key()
}
#[inline]

View File

@ -258,31 +258,31 @@ impl<P> BuilderKey<P> {
}
#[inline]
fn conflicts_buffer_all(&self, commands_lock: &Commands<P>, buf: &BufferAccess) -> bool {
fn conflicts_buffer(&self, commands_lock: &Commands<P>, buf: &BufferAccess) -> bool {
// TODO: put the conflicts_* methods directly on the Command trait to avoid an indirect call?
match self.resource_ty {
KeyTy::Buffer => {
let c = &commands_lock.commands[self.command_id];
c.buffer(self.resource_index).conflicts_buffer_all(buf)
c.buffer(self.resource_index).conflicts_buffer(buf)
},
KeyTy::Image => {
let c = &commands_lock.commands[self.command_id];
c.image(self.resource_index).conflicts_buffer_all(buf)
c.image(self.resource_index).conflicts_buffer(buf)
},
}
}
#[inline]
fn conflicts_image_all(&self, commands_lock: &Commands<P>, img: &ImageAccess) -> bool {
fn conflicts_image(&self, commands_lock: &Commands<P>, img: &ImageAccess) -> bool {
// TODO: put the conflicts_* methods directly on the Command trait to avoid an indirect call?
match self.resource_ty {
KeyTy::Buffer => {
let c = &commands_lock.commands[self.command_id];
c.buffer(self.resource_index).conflicts_image_all(img)
c.buffer(self.resource_index).conflicts_image(img)
},
KeyTy::Image => {
let c = &commands_lock.commands[self.command_id];
c.image(self.resource_index).conflicts_image_all(img)
c.image(self.resource_index).conflicts_image(img)
},
}
}
@ -297,11 +297,11 @@ impl<P> PartialEq for BuilderKey<P> {
match other.resource_ty {
KeyTy::Buffer => {
let c = &commands_lock.commands[other.command_id];
self.conflicts_buffer_all(&commands_lock, c.buffer(other.resource_index))
self.conflicts_buffer(&commands_lock, c.buffer(other.resource_index))
},
KeyTy::Image => {
let c = &commands_lock.commands[other.command_id];
self.conflicts_image_all(&commands_lock, c.image(other.resource_index))
self.conflicts_image(&commands_lock, c.image(other.resource_index))
},
}
}
@ -318,11 +318,11 @@ impl<P> Hash for BuilderKey<P> {
match self.resource_ty {
KeyTy::Buffer => {
let c = &commands_lock.commands[self.command_id];
c.buffer(self.resource_index).conflict_key_all()
c.buffer(self.resource_index).conflict_key()
},
KeyTy::Image => {
let c = &commands_lock.commands[self.command_id];
c.image(self.resource_index).conflict_key_all()
c.image(self.resource_index).conflict_key()
},
}.hash(state)
}
@ -867,9 +867,9 @@ unsafe impl<'a> Sync for CbKey<'a> {
impl<'a> CbKey<'a> {
#[inline]
fn conflicts_buffer_all(&self, commands_lock: Option<&Vec<Box<FinalCommand + Send + Sync>>>,
buf: &BufferAccess)
-> bool {
fn conflicts_buffer(&self, commands_lock: Option<&Vec<Box<FinalCommand + Send + Sync>>>,
buf: &BufferAccess)
-> bool {
match *self {
CbKey::Command {
ref commands,
@ -888,24 +888,24 @@ impl<'a> CbKey<'a> {
match resource_ty {
KeyTy::Buffer => {
let c = &commands_lock[command_id];
c.buffer(resource_index).conflicts_buffer_all(buf)
c.buffer(resource_index).conflicts_buffer(buf)
},
KeyTy::Image => {
let c = &commands_lock[command_id];
c.image(resource_index).conflicts_buffer_all(buf)
c.image(resource_index).conflicts_buffer(buf)
},
}
},
CbKey::BufferRef(b) => b.conflicts_buffer_all(buf),
CbKey::ImageRef(i) => i.conflicts_buffer_all(buf),
CbKey::BufferRef(b) => b.conflicts_buffer(buf),
CbKey::ImageRef(i) => i.conflicts_buffer(buf),
}
}
#[inline]
fn conflicts_image_all(&self, commands_lock: Option<&Vec<Box<FinalCommand + Send + Sync>>>,
img: &ImageAccess)
-> bool {
fn conflicts_image(&self, commands_lock: Option<&Vec<Box<FinalCommand + Send + Sync>>>,
img: &ImageAccess)
-> bool {
match *self {
CbKey::Command {
ref commands,
@ -924,17 +924,17 @@ impl<'a> CbKey<'a> {
match resource_ty {
KeyTy::Buffer => {
let c = &commands_lock[command_id];
c.buffer(resource_index).conflicts_image_all(img)
c.buffer(resource_index).conflicts_image(img)
},
KeyTy::Image => {
let c = &commands_lock[command_id];
c.image(resource_index).conflicts_image_all(img)
c.image(resource_index).conflicts_image(img)
},
}
},
CbKey::BufferRef(b) => b.conflicts_image_all(img),
CbKey::ImageRef(i) => i.conflicts_image_all(img),
CbKey::BufferRef(b) => b.conflicts_image(img),
CbKey::ImageRef(i) => i.conflicts_image(img),
}
}
}
@ -944,10 +944,10 @@ impl<'a> PartialEq for CbKey<'a> {
fn eq(&self, other: &CbKey) -> bool {
match *self {
CbKey::BufferRef(a) => {
other.conflicts_buffer_all(None, a)
other.conflicts_buffer(None, a)
},
CbKey::ImageRef(a) => {
other.conflicts_image_all(None, a)
other.conflicts_image(None, a)
},
CbKey::Command {
ref commands,
@ -960,11 +960,11 @@ impl<'a> PartialEq for CbKey<'a> {
match resource_ty {
KeyTy::Buffer => {
let c = &commands_lock[command_id];
other.conflicts_buffer_all(Some(&commands_lock), c.buffer(resource_index))
other.conflicts_buffer(Some(&commands_lock), c.buffer(resource_index))
},
KeyTy::Image => {
let c = &commands_lock[command_id];
other.conflicts_image_all(Some(&commands_lock), c.image(resource_index))
other.conflicts_image(Some(&commands_lock), c.image(resource_index))
},
}
},
@ -990,17 +990,17 @@ impl<'a> Hash for CbKey<'a> {
match resource_ty {
KeyTy::Buffer => {
let c = &commands_lock[command_id];
c.buffer(resource_index).conflict_key_all().hash(state)
c.buffer(resource_index).conflict_key().hash(state)
},
KeyTy::Image => {
let c = &commands_lock[command_id];
c.image(resource_index).conflict_key_all().hash(state)
c.image(resource_index).conflict_key().hash(state)
},
}
},
CbKey::BufferRef(buf) => buf.conflict_key_all().hash(state),
CbKey::ImageRef(img) => img.conflict_key_all().hash(state),
CbKey::BufferRef(buf) => buf.conflict_key().hash(state),
CbKey::ImageRef(img) => img.conflict_key().hash(state),
}
}
}

View File

@ -43,10 +43,10 @@ pub fn check_copy_buffer<S, D, T>(device: &Device, source: &S, destination: &D)
let copy_size = cmp::min(source.size(), destination.size());
if source.conflicts_buffer(0, copy_size, &destination, 0, copy_size) {
if source.conflicts_buffer(&destination) {
return Err(CheckCopyBufferError::OverlappingRanges);
} else {
debug_assert!(!destination.conflicts_buffer(0, copy_size, &source, 0, copy_size));
debug_assert!(!destination.conflicts_buffer(&source));
}
Ok(CheckCopyBuffer { copy_size })

View File

@ -12,6 +12,7 @@ use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use buffer::BufferAccess;
use device::Device;
use device::Queue;
use format::ClearValue;
@ -432,7 +433,17 @@ unsafe impl<F, A> ImageAccess for AttachmentImage<F, A>
}
#[inline]
fn conflict_key(&self, _: u32, _: u32, _: u32, _: u32) -> u64 {
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
false
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
self.conflict_key() == other.conflict_key()
}
#[inline]
fn conflict_key(&self) -> u64 {
self.image.key()
}

View File

@ -293,7 +293,17 @@ unsafe impl<F, A> ImageAccess for ImmutableImage<F, A>
}
#[inline]
fn conflict_key(&self, _: u32, _: u32, _: u32, _: u32) -> u64 {
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
false
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
self.conflict_key() == other.conflict_key() // TODO:
}
#[inline]
fn conflict_key(&self) -> u64 {
self.image.key()
}
@ -391,7 +401,17 @@ unsafe impl<F, A> ImageAccess for ImmutableImageInitialization<F, A>
}
#[inline]
fn conflict_key(&self, _: u32, _: u32, _: u32, _: u32) -> u64 {
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
false
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
self.conflict_key() == other.conflict_key() // TODO:
}
#[inline]
fn conflict_key(&self) -> u64 {
self.image.image.key()
}

View File

@ -12,6 +12,7 @@ use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use buffer::BufferAccess;
use device::Device;
use device::Queue;
use format::ClearValue;
@ -198,7 +199,17 @@ unsafe impl<F, A> ImageAccess for StorageImage<F, A>
}
#[inline]
fn conflict_key(&self, _: u32, _: u32, _: u32, _: u32) -> u64 {
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
false
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
self.conflict_key() == other.conflict_key() // TODO:
}
#[inline]
fn conflict_key(&self) -> u64 {
self.image.key()
}

View File

@ -9,6 +9,7 @@
use std::sync::Arc;
use buffer::BufferAccess;
use device::Queue;
use format::ClearValue;
use format::Format;
@ -101,7 +102,17 @@ unsafe impl ImageAccess for SwapchainImage {
}
#[inline]
fn conflict_key(&self, _: u32, _: u32, _: u32, _: u32) -> u64 {
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
false
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
self.my_image().image.key() == other.conflict_key() // TODO:
}
#[inline]
fn conflict_key(&self) -> u64 {
self.my_image().image.key()
}

View File

@ -26,7 +26,6 @@ use sampler::Sampler;
use sync::AccessError;
use SafeDeref;
use VulkanObject;
/// Trait for types that represent the way a GPU can access an image.
pub unsafe trait ImageAccess {
@ -127,53 +126,28 @@ pub unsafe trait ImageAccess {
}
}
/// Returns true if an access to `self` (as defined by `self_first_layer`, `self_num_layers`,
/// `self_first_mipmap` and `self_num_mipmaps`) potentially overlaps the same memory as an
/// access to `other` (as defined by `other_offset` and `other_size`).
/// Returns true if an access to `self` potentially overlaps the same memory as an
/// access to `other`.
///
/// If this function returns `false`, this means that we are allowed to access the offset/size
/// of `self` at the same time as the offset/size of `other` without causing a data race.
/// If this function returns `false`, this means that we are allowed to access the content
/// of `self` at the same time as the content of `other` without causing a data race.
///
/// Note that the function must be transitive. In other words if `conflicts(a, b)` is true and
/// `conflicts(b, c)` is true, then `conflicts(a, c)` must be true as well.
fn conflicts_buffer(&self, self_first_layer: u32, self_num_layers: u32,
self_first_mipmap: u32, self_num_mipmaps: u32, other: &BufferAccess,
other_offset: usize, other_size: usize)
-> bool {
// TODO: should we really provide a default implementation?
false
}
fn conflicts_buffer(&self, other: &BufferAccess) -> bool;
/// Returns true if an access to `self` (as defined by `self_first_layer`, `self_num_layers`,
/// `self_first_mipmap` and `self_num_mipmaps`) potentially overlaps the same memory as an
/// access to `other` (as defined by `other_first_layer`, `other_num_layers`,
/// `other_first_mipmap` and `other_num_mipmaps`).
/// Returns true if an access to `self` potentially overlaps the same memory as an
/// access to `other`.
///
/// If this function returns `false`, this means that we are allowed to access the offset/size
/// of `self` at the same time as the offset/size of `other` without causing a data race.
/// If this function returns `false`, this means that we are allowed to access the content
/// of `self` at the same time as the content of `other` without causing a data race.
///
/// Note that the function must be transitive. In other words if `conflicts(a, b)` is true and
/// `conflicts(b, c)` is true, then `conflicts(a, c)` must be true as well.
fn conflicts_image(&self, self_first_layer: u32, self_num_layers: u32,
self_first_mipmap: u32, self_num_mipmaps: u32, other: &ImageAccess,
other_first_layer: u32, other_num_layers: u32, other_first_mipmap: u32,
other_num_mipmaps: u32)
-> bool {
// TODO: should we really provide a default implementation?
fn conflicts_image(&self, other: &ImageAccess) -> bool;
// TODO: debug asserts to check for ranges
if self.inner().image.internal_object() != other.inner().image.internal_object() {
return false;
}
true
}
/// Returns a key that uniquely identifies the range given by
/// first_layer/num_layers/first_mipmap/num_mipmaps.
///
/// Two ranges that potentially overlap in memory should return the same key.
/// Returns a key that uniquely identifies the memory content of the image.
/// Two ranges that potentially overlap in memory must return the same key.
///
/// The key is shared amongst all buffers and images, which means that you can make several
/// different image objects share the same memory, or make some image objects share memory
@ -182,40 +156,7 @@ pub unsafe trait ImageAccess {
/// Since it is possible to accidentally return the same key for memory ranges that don't
/// overlap, the `conflicts_image` or `conflicts_buffer` function should always be called to
/// verify whether they actually overlap.
fn conflict_key(&self, first_layer: u32, num_layers: u32, first_mipmap: u32, num_mipmaps: u32)
-> u64;
/// Shortcut for `conflicts_buffer` that compares the whole buffer to another.
#[inline]
fn conflicts_buffer_all(&self, other: &BufferAccess) -> bool {
self.conflicts_buffer(0,
self.dimensions().array_layers(),
0,
self.mipmap_levels(),
other,
0,
other.size())
}
/// Shortcut for `conflicts_image` that compares the whole buffer to a whole image.
#[inline]
fn conflicts_image_all(&self, other: &ImageAccess) -> bool {
self.conflicts_image(0,
self.dimensions().array_layers(),
0,
self.mipmap_levels(),
other,
0,
other.dimensions().array_layers(),
0,
other.mipmap_levels())
}
/// Shortcut for `conflict_key` that grabs the key of the whole buffer.
#[inline]
fn conflict_key_all(&self) -> u64 {
self.conflict_key(0, self.dimensions().array_layers(), 0, self.mipmap_levels())
}
fn conflict_key(&self) -> u64;
/// Locks the resource for usage on the GPU. Returns an error if the lock can't be acquired.
///
@ -284,9 +225,18 @@ unsafe impl<T> ImageAccess for T
}
#[inline]
fn conflict_key(&self, first_layer: u32, num_layers: u32, first_mipmap: u32, num_mipmaps: u32)
-> u64 {
(**self).conflict_key(first_layer, num_layers, first_mipmap, num_mipmaps)
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
(**self).conflicts_buffer(other)
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
(**self).conflicts_image(other)
}
#[inline]
fn conflict_key(&self) -> u64 {
(**self).conflict_key()
}
#[inline]
@ -336,10 +286,18 @@ unsafe impl<I> ImageAccess for ImageAccessFromUndefinedLayout<I>
}
#[inline]
fn conflict_key(&self, first_layer: u32, num_layers: u32, first_mipmap: u32, num_mipmaps: u32)
-> u64 {
self.image
.conflict_key(first_layer, num_layers, first_mipmap, num_mipmaps)
fn conflicts_buffer(&self, other: &BufferAccess) -> bool {
self.image.conflicts_buffer(other)
}
#[inline]
fn conflicts_image(&self, other: &ImageAccess) -> bool {
self.image.conflicts_image(other)
}
#[inline]
fn conflict_key(&self) -> u64 {
self.image.conflict_key()
}
#[inline]