84: Create buffer mapped r=kvark a=swiftcoder

I haven't really taken a swing at error handling here, posting it up early for feedback on the API.

85: Unpin nightly date for bindings generation r=kvark a=grovesNL

rust-lang/rust#57915 allows us to run bindings generation with nightly again, so we shouldn't have to pin to an older nightly date anymore (assuming everyone has a recent nightly).

Co-authored-by: Tristam MacDonald <tristam@trist.am>
Co-authored-by: Joshua Groves <josh@joshgroves.com>
This commit is contained in:
bors[bot] 2019-03-01 18:42:35 +00:00
commit 152fad43c3
4 changed files with 76 additions and 48 deletions

View File

@ -54,7 +54,7 @@ lib-rust: Cargo.lock wgpu-rs/Cargo.toml $(wildcard wgpu-rs/**/*.rs)
cargo build --manifest-path wgpu-rs/Cargo.toml --features $(FEATURE_RUST)
wgpu-bindings/*.h: Cargo.lock wgpu-bindings/src/*.rs lib-native
cargo +nightly-2018-12-27 run --manifest-path wgpu-bindings/Cargo.toml
cargo +nightly run --manifest-path wgpu-bindings/Cargo.toml
examples-native: lib-native wgpu-bindings/wgpu.h $(wildcard wgpu-native/**/*.c)
#$(MAKE) -C examples

View File

@ -117,32 +117,25 @@ impl framework::Example for Example {
let (vertex_data, index_data) = create_vertices();
let vertex_buffer_length = vertex_data.len() * vertex_size;
let index_buffer_length = index_data.len() * mem::size_of::<u16>();
let vertex_buf = device.create_buffer(&wgpu::BufferDescriptor {
size: vertex_buffer_length as u32,
usage: wgpu::BufferUsageFlags::VERTEX | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
});
//vertex_buf.set_sub_data(0, framework::cast_slice(&vertex_data));
vertex_buf.map_write_async(0, vertex_buffer_length as u32, |result: wgpu::BufferMapAsyncResult<&mut [Vertex]>| {
if let wgpu::BufferMapAsyncResult::Success(data) = result {
data.copy_from_slice(&vertex_data);
}
let vertex_buf = {
let (vertex_buf, vertex_buf_data) = device.create_buffer_mapped(&wgpu::BufferDescriptor {
size: vertex_buffer_length as u32,
usage: wgpu::BufferUsageFlags::VERTEX | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
});
vertex_buf_data.copy_from_slice(&vertex_data);
vertex_buf.unmap();
});
let index_buf = device.create_buffer(&wgpu::BufferDescriptor {
size: index_buffer_length as u32,
usage: wgpu::BufferUsageFlags::INDEX | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
});
// index_buf.set_sub_data(0, framework::cast_slice(&index_data));
index_buf.map_write_async(0, index_buffer_length as u32, |result: wgpu::BufferMapAsyncResult<&mut [u16]>| {
if let wgpu::BufferMapAsyncResult::Success(data) = result {
data.copy_from_slice(&index_data);
}
vertex_buf
};
let index_buf = {
let (index_buf, index_buf_data) = device.create_buffer_mapped(&wgpu::BufferDescriptor {
size: index_buffer_length as u32,
usage: wgpu::BufferUsageFlags::INDEX | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
});
index_buf_data.copy_from_slice(&index_data);
index_buf.unmap();
});
index_buf
};
// Create pipeline layout
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
@ -184,18 +177,15 @@ impl framework::Example for Example {
usage: wgpu::TextureUsageFlags::SAMPLED | wgpu::TextureUsageFlags::TRANSFER_DST,
});
let texture_view = texture.create_default_view();
let temp_buf = device.create_buffer(&wgpu::BufferDescriptor {
size: texels.len() as u32,
usage: wgpu::BufferUsageFlags::TRANSFER_SRC | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
});
// temp_buf.set_sub_data(0, &texels);
temp_buf.map_write_async(0, texels.len() as u32, |result: wgpu::BufferMapAsyncResult<&mut [u8]>| {
if let wgpu::BufferMapAsyncResult::Success(data) = result {
data.copy_from_slice(&texels);
}
let temp_buf = {
let (temp_buf, temp_buf_data) = device.create_buffer_mapped(&wgpu::BufferDescriptor {
size: texels.len() as u32,
usage: wgpu::BufferUsageFlags::TRANSFER_SRC | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
});
temp_buf_data.copy_from_slice(&texels);
temp_buf.unmap();
});
temp_buf
};
init_encoder.copy_buffer_to_texture(
wgpu::BufferCopyView {
buffer: &temp_buf,
@ -230,20 +220,17 @@ impl framework::Example for Example {
compare_function: wgpu::CompareFunction::Always,
border_color: wgpu::BorderColor::TransparentBlack,
});
let uniform_buf = device.create_buffer(&wgpu::BufferDescriptor {
size: 64,
usage: wgpu::BufferUsageFlags::UNIFORM | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
});
let mx_total = Self::generate_matrix(sc_desc.width as f32 / sc_desc.height as f32);
let mx_ref: &[f32; 16] = mx_total.as_ref();
// uniform_buf.set_sub_data(0, framework::cast_slice(&mx_ref[..]));
uniform_buf.map_write_async(0, 64, |result: wgpu::BufferMapAsyncResult<&mut [f32]>| {
if let wgpu::BufferMapAsyncResult::Success(data) = result {
data.copy_from_slice(mx_ref);
}
let uniform_buf = {
let (uniform_buf, uniform_buf_data) = device.create_buffer_mapped(&wgpu::BufferDescriptor {
size: 64,
usage: wgpu::BufferUsageFlags::UNIFORM | wgpu::BufferUsageFlags::TRANSFER_DST | wgpu::BufferUsageFlags::MAP_WRITE,
});
uniform_buf_data.copy_from_slice(mx_ref);
uniform_buf.unmap();
});
uniform_buf
};
// Create bind group
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {

View File

@ -469,13 +469,14 @@ pub fn device_track_buffer(
device_id: DeviceId,
buffer_id: BufferId,
ref_count: RefCount,
flags: resource::BufferUsageFlags,
) {
let query = HUB.devices
.read()
[device_id].trackers
.lock()
.buffers
.query(buffer_id, &ref_count, resource::BufferUsageFlags::empty());
.query(buffer_id, &ref_count, flags);
assert!(query.initialized);
}
@ -488,7 +489,30 @@ pub extern "C" fn wgpu_device_create_buffer(
let buffer = device_create_buffer(device_id, desc);
let ref_count = buffer.life_guard.ref_count.clone();
let id = HUB.buffers.register_local(buffer);
device_track_buffer(device_id, id, ref_count);
device_track_buffer(device_id, id, ref_count, resource::BufferUsageFlags::empty());
id
}
#[cfg(feature = "local")]
#[no_mangle]
pub extern "C" fn wgpu_device_create_buffer_mapped(
device_id: DeviceId,
desc: &resource::BufferDescriptor,
mapped_ptr_out: *mut *mut u8
) -> BufferId {
let buffer = device_create_buffer(device_id, desc);
let device_guard = HUB.devices.read();
let device = &device_guard[device_id];
if let Ok(ptr) = unsafe { device.raw.map_memory(&buffer.memory, 0..(desc.size as u64)) } {
unsafe{ *mapped_ptr_out = ptr; }
}
let ref_count = buffer.life_guard.ref_count.clone();
let id = HUB.buffers.register_local(buffer);
device_track_buffer(device_id, id, ref_count, resource::BufferUsageFlags::MAP_WRITE);
id
}

View File

@ -432,6 +432,23 @@ impl Device {
}
}
pub fn create_buffer_mapped<T>(&self, desc: &BufferDescriptor) -> (Buffer, &mut [T])
where T: 'static + Copy {
let type_size = std::mem::size_of::<T>() as u32;
assert_ne!(type_size, 0);
assert_eq!(desc.size % type_size, 0);
let mut ptr : *mut u8 = std::ptr::null_mut();
let buffer = Buffer {
id: wgn::wgpu_device_create_buffer_mapped(self.id, desc, &mut ptr as *mut *mut u8),
};
let data = unsafe { std::slice::from_raw_parts_mut(ptr as *mut T, desc.size as usize / std::mem::size_of::<T>()) };
(buffer, data)
}
pub fn create_texture(&self, desc: &TextureDescriptor) -> Texture {
Texture {
id: wgn::wgpu_device_create_texture(self.id, desc),