mirror of
https://github.com/gfx-rs/wgpu.git
synced 2024-11-25 08:13:27 +00:00
[rs] prototype of async/await for buffer mapping
This commit is contained in:
parent
1968eb81e7
commit
a85f95dfdd
3
.gitignore
vendored
3
.gitignore
vendored
@ -12,5 +12,8 @@ Cargo.lock
|
|||||||
# Other
|
# Other
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
|
# VSCode project
|
||||||
|
.vscode
|
||||||
|
|
||||||
# Output from capture example
|
# Output from capture example
|
||||||
red.png
|
red.png
|
||||||
|
@ -46,3 +46,4 @@ log = "0.4"
|
|||||||
png = "0.15"
|
png = "0.15"
|
||||||
winit = "0.20.0-alpha4"
|
winit = "0.20.0-alpha4"
|
||||||
zerocopy = "0.2"
|
zerocopy = "0.2"
|
||||||
|
futures = "0.3"
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
|
|
||||||
fn main() {
|
async fn run() {
|
||||||
env_logger::init();
|
env_logger::init();
|
||||||
|
|
||||||
let adapter = wgpu::Adapter::request(
|
let adapter = wgpu::Adapter::request(
|
||||||
@ -86,21 +86,21 @@ fn main() {
|
|||||||
queue.submit(&[command_buffer]);
|
queue.submit(&[command_buffer]);
|
||||||
|
|
||||||
// Write the buffer as a PNG
|
// Write the buffer as a PNG
|
||||||
output_buffer.map_read_async(
|
if let Ok(mapping) = output_buffer.map_read(0u64, (size * size) as u64 * size_of::<u32>() as u64).await {
|
||||||
0,
|
|
||||||
(size * size) as usize * size_of::<u32>(),
|
|
||||||
move |result: wgpu::BufferMapAsyncResult<&[u8]>| {
|
|
||||||
let mut png_encoder = png::Encoder::new(File::create("red.png").unwrap(), size, size);
|
let mut png_encoder = png::Encoder::new(File::create("red.png").unwrap(), size, size);
|
||||||
png_encoder.set_depth(png::BitDepth::Eight);
|
png_encoder.set_depth(png::BitDepth::Eight);
|
||||||
png_encoder.set_color(png::ColorType::RGBA);
|
png_encoder.set_color(png::ColorType::RGBA);
|
||||||
png_encoder
|
png_encoder
|
||||||
.write_header()
|
.write_header()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.write_image_data(result.unwrap().data)
|
.write_image_data(mapping.as_slice())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
},
|
}
|
||||||
);
|
|
||||||
|
|
||||||
// The device will be polled when it is dropped but we can also poll it explicitly
|
// The device will be polled when it is dropped but we can also poll it explicitly
|
||||||
device.poll(true);
|
device.poll(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
futures::executor::block_on(run());
|
||||||
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use std::{convert::TryInto as _, str::FromStr};
|
use std::{convert::TryInto as _, str::FromStr};
|
||||||
use zerocopy::AsBytes as _;
|
use zerocopy::AsBytes as _;
|
||||||
|
|
||||||
fn main() {
|
async fn run() {
|
||||||
env_logger::init();
|
env_logger::init();
|
||||||
|
|
||||||
// For now this just panics if you didn't pass numbers. Could add proper error handling.
|
// For now this just panics if you didn't pass numbers. Could add proper error handling.
|
||||||
@ -93,15 +93,17 @@ fn main() {
|
|||||||
|
|
||||||
queue.submit(&[encoder.finish()]);
|
queue.submit(&[encoder.finish()]);
|
||||||
|
|
||||||
// FIXME: Align and use `LayoutVerified`
|
if let Ok(mapping) = staging_buffer.map_read(0u64, size).await {
|
||||||
staging_buffer.map_read_async(0, slice_size, |result| {
|
let times : Box<[u32]> = mapping
|
||||||
if let Ok(mapping) = result {
|
.as_slice()
|
||||||
let times: Box<[u32]> = mapping
|
|
||||||
.data
|
|
||||||
.chunks_exact(4)
|
.chunks_exact(4)
|
||||||
.map(|b| u32::from_ne_bytes(b.try_into().unwrap()))
|
.map(|b| u32::from_ne_bytes(b.try_into().unwrap()))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
println!("Times: {:?}", times);
|
println!("Times: {:?}", times);
|
||||||
}
|
}
|
||||||
});
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
futures::executor::block_on(run());
|
||||||
}
|
}
|
||||||
|
72
wgpu/src/future.rs
Normal file
72
wgpu/src/future.rs
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
use std::future::Future;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::task::{Context, Poll, Waker};
|
||||||
|
|
||||||
|
struct GpuFutureInner<T> {
|
||||||
|
id: wgc::id::DeviceId,
|
||||||
|
result: Option<T>,
|
||||||
|
waker: Option<Waker>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A Future that can poll the wgpu::Device
|
||||||
|
pub struct GpuFuture<T> {
|
||||||
|
inner: Arc<Mutex<GpuFutureInner<T>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A completion handle to set the result on a GpuFuture
|
||||||
|
pub struct GpuFutureCompletion<T> {
|
||||||
|
inner: Arc<Mutex<GpuFutureInner<T>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Future for GpuFuture<T>
|
||||||
|
{
|
||||||
|
type Output = T;
|
||||||
|
|
||||||
|
fn poll(self: Pin<&mut Self>, context: &mut Context) -> Poll<Self::Output> {
|
||||||
|
// grab a clone of the Arc
|
||||||
|
let arc = Arc::clone(&Pin::into_inner(self).inner);
|
||||||
|
|
||||||
|
// grab the device id and set the waker, but release the lock, so that the native callback can write to it
|
||||||
|
let device_id = {
|
||||||
|
let mut inner = arc.lock().unwrap();
|
||||||
|
inner.waker.replace(context.waker().clone());
|
||||||
|
inner.id
|
||||||
|
};
|
||||||
|
|
||||||
|
// polling the device should trigger the callback
|
||||||
|
wgn::wgpu_device_poll(device_id, true);
|
||||||
|
|
||||||
|
// now take the lock again, and check whether the future is complete
|
||||||
|
let mut inner = arc.lock().unwrap();
|
||||||
|
match inner.result.take() {
|
||||||
|
Some(value) => Poll::Ready(value),
|
||||||
|
_ => Poll::Pending,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> GpuFutureCompletion<T> {
|
||||||
|
pub fn complete(self, value: T) {
|
||||||
|
let mut inner = self.inner.lock().unwrap();
|
||||||
|
inner.result.replace(value);
|
||||||
|
if let Some(waker) = &inner.waker {
|
||||||
|
waker.wake_by_ref();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new_gpu_future<T>(id: wgc::id::DeviceId) -> (GpuFuture<T>, GpuFutureCompletion<T>) {
|
||||||
|
let inner = Arc::new(Mutex::new(GpuFutureInner {
|
||||||
|
id,
|
||||||
|
result: None,
|
||||||
|
waker: None,
|
||||||
|
}));
|
||||||
|
|
||||||
|
(
|
||||||
|
GpuFuture {
|
||||||
|
inner: inner.clone(),
|
||||||
|
},
|
||||||
|
GpuFutureCompletion { inner },
|
||||||
|
)
|
||||||
|
}
|
147
wgpu/src/lib.rs
147
wgpu/src/lib.rs
@ -1,5 +1,9 @@
|
|||||||
//! A cross-platform graphics and compute library based on WebGPU.
|
//! A cross-platform graphics and compute library based on WebGPU.
|
||||||
|
|
||||||
|
mod future;
|
||||||
|
use future::GpuFutureCompletion;
|
||||||
|
pub use future::GpuFuture;
|
||||||
|
|
||||||
use arrayvec::ArrayVec;
|
use arrayvec::ArrayVec;
|
||||||
|
|
||||||
use std::ffi::CString;
|
use std::ffi::CString;
|
||||||
@ -100,6 +104,7 @@ pub struct Device {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Buffer {
|
pub struct Buffer {
|
||||||
id: wgc::id::BufferId,
|
id: wgc::id::BufferId,
|
||||||
|
device_id: wgc::id::DeviceId,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A handle to a texture on the GPU.
|
/// A handle to a texture on the GPU.
|
||||||
@ -494,13 +499,14 @@ impl<'a> TextureCopyView<'a> {
|
|||||||
pub struct CreateBufferMapped<'a> {
|
pub struct CreateBufferMapped<'a> {
|
||||||
id: wgc::id::BufferId,
|
id: wgc::id::BufferId,
|
||||||
pub data: &'a mut [u8],
|
pub data: &'a mut [u8],
|
||||||
|
device_id: wgc::id::DeviceId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CreateBufferMapped<'_> {
|
impl CreateBufferMapped<'_> {
|
||||||
/// Unmaps the buffer from host memory and returns a [`Buffer`].
|
/// Unmaps the buffer from host memory and returns a [`Buffer`].
|
||||||
pub fn finish(self) -> Buffer {
|
pub fn finish(self) -> Buffer {
|
||||||
wgn::wgpu_buffer_unmap(self.id);
|
wgn::wgpu_buffer_unmap(self.id);
|
||||||
Buffer { id: self.id }
|
Buffer { device_id: self.device_id, id: self.id }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -790,6 +796,7 @@ impl Device {
|
|||||||
/// Creates a new buffer.
|
/// Creates a new buffer.
|
||||||
pub fn create_buffer(&self, desc: &BufferDescriptor) -> Buffer {
|
pub fn create_buffer(&self, desc: &BufferDescriptor) -> Buffer {
|
||||||
Buffer {
|
Buffer {
|
||||||
|
device_id: self.id,
|
||||||
id: wgn::wgpu_device_create_buffer(self.id, desc),
|
id: wgn::wgpu_device_create_buffer(self.id, desc),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -811,7 +818,7 @@ impl Device {
|
|||||||
|
|
||||||
let data = unsafe { std::slice::from_raw_parts_mut(ptr as *mut u8, size) };
|
let data = unsafe { std::slice::from_raw_parts_mut(ptr as *mut u8, size) };
|
||||||
|
|
||||||
CreateBufferMapped { id, data }
|
CreateBufferMapped { device_id: self.id, id, data }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new buffer, maps it into host-visible memory, copies data from the given slice,
|
/// Creates a new buffer, maps it into host-visible memory, copies data from the given slice,
|
||||||
@ -858,6 +865,52 @@ impl Drop for Device {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct BufferReadMapping {
|
||||||
|
data: *const u8,
|
||||||
|
size: usize,
|
||||||
|
buffer_id: wgc::id::BufferId,
|
||||||
|
}
|
||||||
|
//TODO: proper error type
|
||||||
|
pub type BufferMapReadResult = Result<BufferReadMapping, ()>;
|
||||||
|
|
||||||
|
impl BufferReadMapping
|
||||||
|
{
|
||||||
|
pub fn as_slice(&self) -> &[u8] {
|
||||||
|
unsafe {
|
||||||
|
slice::from_raw_parts(self.data as *const u8, self.size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for BufferReadMapping {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
wgn::wgpu_buffer_unmap(self.buffer_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct BufferWriteMapping {
|
||||||
|
data: *mut u8,
|
||||||
|
size: usize,
|
||||||
|
buffer_id: wgc::id::BufferId,
|
||||||
|
}
|
||||||
|
//TODO: proper error type
|
||||||
|
pub type BufferMapWriteResult = Result<BufferWriteMapping, ()>;
|
||||||
|
|
||||||
|
impl BufferWriteMapping
|
||||||
|
{
|
||||||
|
pub fn as_slice(&mut self) -> &mut [u8] {
|
||||||
|
unsafe {
|
||||||
|
slice::from_raw_parts_mut(self.data as *mut u8, self.size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for BufferWriteMapping {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
wgn::wgpu_buffer_unmap(self.buffer_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct BufferAsyncMapping<T> {
|
pub struct BufferAsyncMapping<T> {
|
||||||
pub data: T,
|
pub data: T,
|
||||||
buffer_id: wgc::id::BufferId,
|
buffer_id: wgc::id::BufferId,
|
||||||
@ -871,97 +924,99 @@ impl<T> Drop for BufferAsyncMapping<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct BufferMapReadAsyncUserData<F>
|
struct BufferMapReadFutureUserData
|
||||||
where
|
|
||||||
F: FnOnce(BufferMapAsyncResult<&[u8]>),
|
|
||||||
{
|
{
|
||||||
size: usize,
|
size: BufferAddress,
|
||||||
callback: F,
|
completion: GpuFutureCompletion<BufferMapReadResult>,
|
||||||
buffer_id: wgc::id::BufferId,
|
buffer_id: wgc::id::BufferId,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct BufferMapWriteAsyncUserData<F>
|
struct BufferMapWriteFutureUserData
|
||||||
where
|
|
||||||
F: FnOnce(BufferMapAsyncResult<&mut [u8]>),
|
|
||||||
{
|
{
|
||||||
size: usize,
|
size: BufferAddress,
|
||||||
callback: F,
|
completion: GpuFutureCompletion<BufferMapWriteResult>,
|
||||||
buffer_id: wgc::id::BufferId,
|
buffer_id: wgc::id::BufferId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Buffer {
|
impl Buffer {
|
||||||
pub fn map_read_async<F>(&self, start: BufferAddress, size: usize, callback: F)
|
/// Map the buffer for reading. The result is returned in a future.
|
||||||
where
|
pub fn map_read(&self, start: BufferAddress, size: BufferAddress) -> GpuFuture<BufferMapReadResult>
|
||||||
F: FnOnce(BufferMapAsyncResult<&[u8]>),
|
|
||||||
{
|
{
|
||||||
extern "C" fn buffer_map_read_callback_wrapper<F>(
|
let (future, completion) = future::new_gpu_future(self.device_id);
|
||||||
status: BufferMapAsyncStatus,
|
|
||||||
|
extern "C" fn buffer_map_read_future_wrapper(
|
||||||
|
status: wgc::resource::BufferMapAsyncStatus,
|
||||||
data: *const u8,
|
data: *const u8,
|
||||||
user_data: *mut u8,
|
user_data: *mut u8,
|
||||||
) where
|
)
|
||||||
F: FnOnce(BufferMapAsyncResult<&[u8]>),
|
|
||||||
{
|
{
|
||||||
let user_data =
|
let user_data =
|
||||||
unsafe { Box::from_raw(user_data as *mut BufferMapReadAsyncUserData<F>) };
|
unsafe { Box::from_raw(user_data as *mut BufferMapReadFutureUserData) };
|
||||||
let data: &[u8] = unsafe { slice::from_raw_parts(data as *const u8, user_data.size) };
|
if let wgc::resource::BufferMapAsyncStatus::Success = status {
|
||||||
match status {
|
user_data.completion.complete(Ok(BufferReadMapping {
|
||||||
BufferMapAsyncStatus::Success => (user_data.callback)(Ok(BufferAsyncMapping {
|
|
||||||
data,
|
data,
|
||||||
|
size: user_data.size as usize,
|
||||||
buffer_id: user_data.buffer_id,
|
buffer_id: user_data.buffer_id,
|
||||||
})),
|
}));
|
||||||
_ => (user_data.callback)(Err(())),
|
} else {
|
||||||
|
user_data.completion.complete(Err(()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let user_data = Box::new(BufferMapReadAsyncUserData {
|
let user_data = Box::new(BufferMapReadFutureUserData {
|
||||||
size,
|
size,
|
||||||
callback,
|
completion,
|
||||||
buffer_id: self.id,
|
buffer_id: self.id,
|
||||||
});
|
});
|
||||||
wgn::wgpu_buffer_map_read_async(
|
wgn::wgpu_buffer_map_read_async(
|
||||||
self.id,
|
self.id,
|
||||||
start,
|
start,
|
||||||
size as BufferAddress,
|
size,
|
||||||
buffer_map_read_callback_wrapper::<F>,
|
buffer_map_read_future_wrapper,
|
||||||
Box::into_raw(user_data) as *mut u8,
|
Box::into_raw(user_data) as *mut u8,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
future
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn map_write_async<F>(&self, start: BufferAddress, size: usize, callback: F)
|
/// Map the buffer for writing. The result is returned in a future.
|
||||||
where
|
pub fn map_write(&self, start: BufferAddress, size: BufferAddress) -> GpuFuture<BufferMapWriteResult>
|
||||||
F: FnOnce(BufferMapAsyncResult<&mut [u8]>),
|
|
||||||
{
|
{
|
||||||
extern "C" fn buffer_map_write_callback_wrapper<F>(
|
let (future, completion) = future::new_gpu_future(self.device_id);
|
||||||
status: BufferMapAsyncStatus,
|
|
||||||
|
extern "C" fn buffer_map_write_future_wrapper(
|
||||||
|
status: wgc::resource::BufferMapAsyncStatus,
|
||||||
data: *mut u8,
|
data: *mut u8,
|
||||||
user_data: *mut u8,
|
user_data: *mut u8,
|
||||||
) where
|
)
|
||||||
F: FnOnce(BufferMapAsyncResult<&mut [u8]>),
|
|
||||||
{
|
{
|
||||||
let user_data =
|
let user_data =
|
||||||
unsafe { Box::from_raw(user_data as *mut BufferMapWriteAsyncUserData<F>) };
|
unsafe { Box::from_raw(user_data as *mut BufferMapWriteFutureUserData) };
|
||||||
let data = unsafe { slice::from_raw_parts_mut(data as *mut u8, user_data.size) };
|
if let wgc::resource::BufferMapAsyncStatus::Success = status {
|
||||||
match status {
|
user_data.completion.complete(Ok(BufferWriteMapping {
|
||||||
BufferMapAsyncStatus::Success => (user_data.callback)(Ok(BufferAsyncMapping {
|
|
||||||
data,
|
data,
|
||||||
|
size: user_data.size as usize,
|
||||||
buffer_id: user_data.buffer_id,
|
buffer_id: user_data.buffer_id,
|
||||||
})),
|
}));
|
||||||
_ => (user_data.callback)(Err(())),
|
} else {
|
||||||
|
user_data.completion.complete(Err(()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let user_data = Box::new(BufferMapWriteAsyncUserData {
|
let user_data = Box::new(BufferMapWriteFutureUserData {
|
||||||
size,
|
size,
|
||||||
callback,
|
completion,
|
||||||
buffer_id: self.id,
|
buffer_id: self.id,
|
||||||
});
|
});
|
||||||
wgn::wgpu_buffer_map_write_async(
|
wgn::wgpu_buffer_map_write_async(
|
||||||
self.id,
|
self.id,
|
||||||
start,
|
start,
|
||||||
size as BufferAddress,
|
size,
|
||||||
buffer_map_write_callback_wrapper::<F>,
|
buffer_map_write_future_wrapper,
|
||||||
Box::into_raw(user_data) as *mut u8,
|
Box::into_raw(user_data) as *mut u8,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
future
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Flushes any pending write operations and unmaps the buffer from host memory.
|
/// Flushes any pending write operations and unmaps the buffer from host memory.
|
||||||
|
Loading…
Reference in New Issue
Block a user