81: Buffer tracking and unmapping r=kvark a=swiftcoder

Adds preliminary transitioning of buffers to mapped state.
Adds buffer unmapping to the cube sample.
Modifies wgpu_queue_submit to not hold a write lock on the device during callbacks (this could definitely be cleaner, but I'm not sure which direction to take refactoring here).

Co-authored-by: Tristam MacDonald <tristam@trist.am>
This commit is contained in:
bors[bot] 2019-02-28 14:18:16 +00:00
commit 193eec694e
4 changed files with 156 additions and 129 deletions

View File

@ -102,10 +102,8 @@ fn main() {
println!("Times: {:?}", results);
}
staging_buffer.unmap();
});
device.get_queue().submit(&[encoder.finish()]);
// TODO: why does calling unmap() inside the callback prevent the program from exiting?
staging_buffer.unmap();
}

View File

@ -127,6 +127,8 @@ impl framework::Example for Example {
if let wgpu::BufferMapAsyncResult::Success(data) = result {
unsafe { std::ptr::copy_nonoverlapping(vertex_data.as_ptr() as *const u8, data.as_mut_ptr(), vertex_buffer_length) };
}
vertex_buf.unmap();
});
let index_buf = device.create_buffer(&wgpu::BufferDescriptor {
@ -138,6 +140,8 @@ impl framework::Example for Example {
if let wgpu::BufferMapAsyncResult::Success(data) = result {
unsafe { std::ptr::copy_nonoverlapping(index_data.as_ptr() as *const u8, data.as_mut_ptr(), index_buffer_length) };
}
index_buf.unmap();
});
// Create pipeline layout
@ -189,6 +193,8 @@ impl framework::Example for Example {
if let wgpu::BufferMapAsyncResult::Success(data) = result {
unsafe { std::ptr::copy_nonoverlapping(texels.as_ptr() as *const u8, data.as_mut_ptr(), texels.len()) };
}
temp_buf.unmap();
});
init_encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
@ -235,6 +241,8 @@ impl framework::Example for Example {
if let wgpu::BufferMapAsyncResult::Success(data) = result {
unsafe { std::ptr::copy_nonoverlapping(mx_ref.as_ptr() as *const u8, data.as_mut_ptr(), 64) };
}
uniform_buf.unmap();
});
// Create bind group
@ -339,6 +347,8 @@ impl framework::Example for Example {
if let wgpu::BufferMapAsyncResult::Success(data) = result {
unsafe { std::ptr::copy_nonoverlapping(mx_ref.as_ptr() as *const u8, data.as_mut_ptr(), 64) };
}
self.uniform_buf.unmap();
});
}

View File

@ -213,13 +213,16 @@ impl DestroyedResources<back::Backend> {
let buffer_guard = HUB.buffers.read();
for i in (0..self.mapped.len()).rev() {
// one in resource itself, one here in this list, one the owner holds, and one more somewhere?
let num_refs = self.mapped[i].ref_count.load();
trace!("{} references remain", num_refs);
if num_refs <= 4 {
// assert_eq!(num_refs, 4);
let resource_id = self.mapped.swap_remove(i).value;
let buf = &buffer_guard[resource_id];
let usage = match buf.pending_map_operation {
Some(BufferMapOperation::Read(..)) => resource::BufferUsageFlags::MAP_READ,
Some(BufferMapOperation::Write(..)) => resource::BufferUsageFlags::MAP_WRITE,
_ => unreachable!(),
};
trackers.buffers.get_with_replaced_usage(&buffer_guard, resource_id, usage).unwrap();
let submit_index = buf.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
@ -228,7 +231,6 @@ impl DestroyedResources<back::Backend> {
.push(resource_id);
}
}
}
fn triage_framebuffers(
&mut self,
@ -269,34 +271,39 @@ impl DestroyedResources<back::Backend> {
}
fn handle_mapping(&mut self, raw: &<back::Backend as hal::Backend>::Device) {
let mut buffer_guard = HUB.buffers.write();
for buffer_id in self.ready_to_map.drain(..) {
let buffer = &mut buffer_guard[buffer_id];
let mut operation = None;
let (result, ptr) = {
let mut buffer_guard = HUB.buffers.write();
let buffer = &mut buffer_guard[buffer_id];
std::mem::swap(&mut operation, &mut buffer.pending_map_operation);
match operation {
Some(BufferMapOperation::Read(range, callback, userdata)) => {
match operation.clone().unwrap() {
BufferMapOperation::Read(range, ..) => {
if let Ok(ptr) = unsafe { raw.map_memory(&buffer.memory, range.clone()) } {
if !buffer.memory_properties.contains(hal::memory::Properties::COHERENT) {
unsafe { raw.invalidate_mapped_memory_ranges(iter::once((&buffer.memory, range.clone()))).unwrap() }; // TODO
}
callback(BufferMapAsyncStatus::Success, ptr, userdata);
(BufferMapAsyncStatus::Success, Some(ptr))
} else {
callback(BufferMapAsyncStatus::Error, std::ptr::null(), userdata);
(BufferMapAsyncStatus::Error, None)
}
},
Some(BufferMapOperation::Write(range, callback, userdata)) => {
BufferMapOperation::Write(range, ..) => {
if let Ok(ptr) = unsafe { raw.map_memory(&buffer.memory, range.clone()) } {
if !buffer.memory_properties.contains(hal::memory::Properties::COHERENT) {
buffer.mapped_write_ranges.push(range);
buffer.mapped_write_ranges.push(range.clone());
}
callback(BufferMapAsyncStatus::Success, ptr, userdata);
(BufferMapAsyncStatus::Success, Some(ptr))
} else {
callback(BufferMapAsyncStatus::Error, std::ptr::null_mut(), userdata);
(BufferMapAsyncStatus::Error, None)
}
},
_ => unreachable!(),
}
};
match operation.unwrap() {
BufferMapOperation::Read(_, callback, userdata) => callback(result, ptr.unwrap_or(std::ptr::null_mut()), userdata),
BufferMapOperation::Write(_, callback, userdata) => callback(result, ptr.unwrap_or(std::ptr::null_mut()), userdata),
};
}
}
@ -1022,12 +1029,14 @@ pub extern "C" fn wgpu_queue_submit(
command_buffer_ptr: *const CommandBufferId,
command_buffer_count: usize,
) {
let command_buffer_ids =
unsafe { slice::from_raw_parts(command_buffer_ptr, command_buffer_count) };
let (old_submit_index, fence) = {
let mut device_guard = HUB.devices.write();
let device = &mut device_guard[queue_id];
let mut swap_chain_links = Vec::new();
let command_buffer_ids =
unsafe { slice::from_raw_parts(command_buffer_ptr, command_buffer_count) };
let old_submit_index = device
.life_guard
@ -1125,6 +1134,15 @@ pub extern "C" fn wgpu_queue_submit(
}
}
(old_submit_index, fence)
};
// No need for write access to the device from here on out
let device_guard = HUB.devices.read();
let device = &device_guard[queue_id];
let mut trackers = device.trackers.lock();
let last_done = {
let mut destroyed = device.destroyed.lock();
destroyed.triage_referenced(&mut *trackers);

View File

@ -40,6 +40,7 @@ pub enum BufferMapAsyncStatus {
ContextLost,
}
#[derive(Clone)]
pub(crate) enum BufferMapOperation {
Read(std::ops::Range<u64>, BufferMapReadCallback, *mut u8),
Write(std::ops::Range<u64>, BufferMapWriteCallback, *mut u8),