Invalidate and flush only if memory is not coherent

This commit is contained in:
Pierre Krieger 2016-02-18 20:23:47 +01:00
parent 126e6f550d
commit 3b71ba6912

View File

@ -138,6 +138,7 @@ unsafe impl MemorySource for HostVisible {
// fulfill any alignment requirement // fulfill any alignment requirement
Ok(HostVisibleChunk { Ok(HostVisibleChunk {
mem: mem, mem: mem,
coherent: mem_ty.is_host_coherent(),
lock: Mutex::new((None, None)), lock: Mutex::new((None, None)),
}) })
} }
@ -146,6 +147,7 @@ unsafe impl MemorySource for HostVisible {
/// A chunk allocated from a `HostVisible`. /// A chunk allocated from a `HostVisible`.
pub struct HostVisibleChunk { pub struct HostVisibleChunk {
mem: MappedDeviceMemory, mem: MappedDeviceMemory,
coherent: bool,
lock: Mutex<(Option<Arc<Semaphore>>, Option<Arc<Fence>>)>, lock: Mutex<(Option<Arc<Semaphore>>, Option<Arc<Fence>>)>,
} }
@ -213,23 +215,25 @@ unsafe impl<'a, T: 'a> CpuWriteAccessible<'a, T> for HostVisibleChunk { //
} }
lock.1 = None; lock.1 = None;
// TODO: only invalidate if necessary if !self.coherent {
let range = vk::MappedMemoryRange { let range = vk::MappedMemoryRange {
sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
pNext: ptr::null(), pNext: ptr::null(),
memory: self.mem.memory().internal_object(), memory: self.mem.memory().internal_object(),
offset: 0, offset: 0,
size: vk::WHOLE_SIZE, size: vk::WHOLE_SIZE,
}; };
// TODO: check result? // TODO: check result?
unsafe { unsafe {
vk.InvalidateMappedMemoryRanges(self.mem.memory().device().internal_object(), vk.InvalidateMappedMemoryRanges(self.mem.memory().device().internal_object(),
1, &range); 1, &range);
}
} }
GpuAccess { GpuAccess {
mem: &self.mem, mem: &self.mem,
coherent: self.coherent,
guard: lock, guard: lock,
pointer: pointer, pointer: pointer,
} }
@ -254,23 +258,25 @@ unsafe impl<'a, T: 'a> CpuWriteAccessible<'a, T> for HostVisibleChunk { //
lock.1 = None; lock.1 = None;
// TODO: only invalidate if necessary if !self.coherent {
let range = vk::MappedMemoryRange { let range = vk::MappedMemoryRange {
sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
pNext: ptr::null(), pNext: ptr::null(),
memory: self.mem.memory().internal_object(), memory: self.mem.memory().internal_object(),
offset: 0, offset: 0,
size: vk::WHOLE_SIZE, size: vk::WHOLE_SIZE,
}; };
// TODO: check result? // TODO: check result?
unsafe { unsafe {
vk.InvalidateMappedMemoryRanges(self.mem.memory().device().internal_object(), vk.InvalidateMappedMemoryRanges(self.mem.memory().device().internal_object(),
1, &range); 1, &range);
}
} }
Some(GpuAccess { Some(GpuAccess {
mem: &self.mem, mem: &self.mem,
coherent: self.coherent,
guard: lock, guard: lock,
pointer: pointer, pointer: pointer,
}) })
@ -283,8 +289,9 @@ unsafe impl<'a, T: 'a> CpuWriteAccessible<'a, T> for HostVisibleChunk { //
/// this memory's content or tries to submit a GPU command that uses this memory, it will block. /// this memory's content or tries to submit a GPU command that uses this memory, it will block.
pub struct GpuAccess<'a, T: ?Sized + 'a> { pub struct GpuAccess<'a, T: ?Sized + 'a> {
mem: &'a MappedDeviceMemory, mem: &'a MappedDeviceMemory,
guard: MutexGuard<'a, (Option<Arc<Semaphore>>, Option<Arc<Fence>>)>,
pointer: *mut T, pointer: *mut T,
guard: MutexGuard<'a, (Option<Arc<Semaphore>>, Option<Arc<Fence>>)>,
coherent: bool,
} }
impl<'a, T: ?Sized + 'a> Deref for GpuAccess<'a, T> { impl<'a, T: ?Sized + 'a> Deref for GpuAccess<'a, T> {
@ -306,21 +313,22 @@ impl<'a, T: ?Sized + 'a> DerefMut for GpuAccess<'a, T> {
impl<'a, T: ?Sized + 'a> Drop for GpuAccess<'a, T> { impl<'a, T: ?Sized + 'a> Drop for GpuAccess<'a, T> {
#[inline] #[inline]
fn drop(&mut self) { fn drop(&mut self) {
// TODO: only flush if necessary if !self.coherent {
let vk = self.mem.memory().device().pointers();
let vk = self.mem.memory().device().pointers(); let range = vk::MappedMemoryRange {
sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
pNext: ptr::null(),
memory: self.mem.memory().internal_object(),
offset: 0,
size: vk::WHOLE_SIZE,
};
let range = vk::MappedMemoryRange { // TODO: check result?
sType: vk::STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, unsafe {
pNext: ptr::null(), vk.FlushMappedMemoryRanges(self.mem.memory().device().internal_object(),
memory: self.mem.memory().internal_object(), 1, &range);
offset: 0, }
size: vk::WHOLE_SIZE,
};
// TODO: check result?
unsafe {
vk.FlushMappedMemoryRanges(self.mem.memory().device().internal_object(), 1, &range);
} }
} }
} }