38: Draw the first triangle r=grovesNL a=kvark

- [x] vertex/index buffers
- [x] draws
- [x] bind groups
  - [x] creation
  - [x] binding
  - [x] Rust side
- [x] triangle


Co-authored-by: Dzmitry Malyshau <dmalyshau@mozilla.com>
This commit is contained in:
bors[bot] 2019-01-22 20:07:34 +00:00
commit 742fee7e41
15 changed files with 747 additions and 122 deletions

6
Cargo.lock generated
View File

@ -660,7 +660,7 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"autocfg 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -681,7 +681,7 @@ name = "rand_hc"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -719,7 +719,7 @@ name = "rand_xorshift"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rand_core 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]

View File

@ -27,7 +27,7 @@ fn main() {
let depth_stencil_state =
device.create_depth_stencil_state(&wgpu::DepthStencilStateDescriptor::IGNORE);
let _render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
layout: &pipeline_layout,
stages: &[
wgpu::PipelineStageDescriptor {
@ -44,7 +44,7 @@ fn main() {
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
attachments_state: wgpu::AttachmentsState {
color_attachments: &[wgpu::Attachment {
format: wgpu::TextureFormat::R8g8b8a8Unorm,
format: wgpu::TextureFormat::B8g8r8a8Unorm,
samples: 1,
}],
depth_stencil_attachment: None,
@ -95,7 +95,7 @@ fn main() {
let (_, view) = swap_chain.get_next_texture();
let mut cmd_buf = device.create_command_buffer(&wgpu::CommandBufferDescriptor {});
{
let rpass = cmd_buf.begin_render_pass(&wgpu::RenderPassDescriptor {
let mut rpass = cmd_buf.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &view,
load_op: wgpu::LoadOp::Clear,
@ -104,6 +104,8 @@ fn main() {
}],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&render_pipeline);
rpass.draw(0..3, 0..1);
rpass.end_pass();
}
@ -118,6 +120,7 @@ fn main() {
#[cfg(not(feature = "winit"))]
{
let _ = render_pipeline;
let texture = device.create_texture(&wgpu::TextureDescriptor {
size: wgpu::Extent3d {
width: 256,

View File

@ -1,4 +1,8 @@
use crate::{BindGroupLayoutId, BufferId, SamplerId, TextureViewId};
use crate::track::{BufferTracker, TextureTracker};
use crate::{
LifeGuard, WeaklyStored,
BindGroupLayoutId, BufferId, SamplerId, TextureViewId,
};
use bitflags::bitflags;
@ -46,6 +50,7 @@ pub struct PipelineLayoutDescriptor {
pub(crate) struct PipelineLayout<B: hal::Backend> {
pub raw: B::PipelineLayout,
pub bind_group_layout_ids: Vec<WeaklyStored<BindGroupLayoutId>>,
}
#[repr(C)]
@ -77,4 +82,8 @@ pub struct BindGroupDescriptor {
pub(crate) struct BindGroup<B: hal::Backend> {
pub raw: B::DescriptorSet,
pub layout_id: WeaklyStored<BindGroupLayoutId>,
pub life_guard: LifeGuard,
pub used_buffers: BufferTracker,
pub used_textures: TextureTracker,
}

View File

@ -103,15 +103,11 @@ impl<B: hal::Backend> CommandAllocator<B> {
pool.available.pop().unwrap()
}
pub fn submit(&self, cmd_buf: CommandBuffer<B>) {
pub fn after_submit(&self, cmd_buf: CommandBuffer<B>) {
self.inner.lock().pending.push(cmd_buf);
}
pub fn recycle(&self, cmd_buf: CommandBuffer<B>) {
self.inner.lock().recycle(cmd_buf);
}
pub fn maintain(&self, device: &B::Device, last_done: SubmissionIndex) {
pub fn maintain(&self, last_done: SubmissionIndex) {
let mut inner = self.inner.lock();
for i in (0..inner.pending.len()).rev() {
let index = inner.pending[i].life_guard.submission_index.load(Ordering::Acquire);

View File

@ -0,0 +1,78 @@
use crate::{
B, BindGroup, Stored, WeaklyStored,
BindGroupId, BindGroupLayoutId, PipelineLayoutId,
};
pub struct BindGroupPair {
layout_id: WeaklyStored<BindGroupLayoutId>,
group_id: Stored<BindGroupId>,
}
#[derive(Default)]
pub struct BindGroupEntry {
expected_layout_id: Option<WeaklyStored<BindGroupLayoutId>>,
provided: Option<BindGroupPair>,
}
impl BindGroupEntry {
fn provide(&mut self, bind_group_id: BindGroupId, bind_group: &BindGroup<B>) -> bool {
if let Some(BindGroupPair { ref layout_id, ref group_id }) = self.provided {
if group_id.value == bind_group_id {
assert_eq!(*layout_id, bind_group.layout_id);
return false
}
}
self.provided = Some(BindGroupPair {
layout_id: bind_group.layout_id.clone(),
group_id: Stored {
value: bind_group_id,
ref_count: bind_group.life_guard.ref_count.clone(),
},
});
self.expected_layout_id == Some(bind_group.layout_id.clone())
}
pub fn expect_layout(
&mut self, bind_group_layout_id: BindGroupLayoutId,
) -> Option<BindGroupId> {
let some = Some(WeaklyStored(bind_group_layout_id));
if self.expected_layout_id != some {
self.expected_layout_id = some;
match self.provided {
Some(BindGroupPair { ref layout_id, ref group_id })
if layout_id.0 == bind_group_layout_id => Some(group_id.value),
Some(_) | None => None,
}
} else {
None
}
}
}
#[derive(Default)]
pub struct Binder {
pub(crate) pipeline_layout_id: Option<WeaklyStored<PipelineLayoutId>>, //TODO: strongly `Stored`
pub(crate) entries: Vec<BindGroupEntry>,
}
impl Binder {
pub fn ensure_length(&mut self, length: usize) {
while self.entries.len() < length {
self.entries.push(BindGroupEntry::default());
}
}
pub(crate) fn provide_entry(
&mut self, index: usize, bind_group_id: BindGroupId, bind_group: &BindGroup<B>
) -> Option<PipelineLayoutId> {
self.ensure_length(index + 1);
if self.entries[index].provide(bind_group_id, bind_group) {
self.pipeline_layout_id.as_ref().map(|&WeaklyStored(id)| id)
} else {
None
}
}
}

View File

@ -1,6 +1,12 @@
use crate::command::bind::{Binder};
use crate::registry::{Items, HUB};
use crate::{BindGroupId, CommandBufferId, ComputePassId, ComputePipelineId, Stored};
use crate::track::{BufferTracker, TextureTracker};
use crate::{
Stored, CommandBuffer,
BindGroupId, CommandBufferId, ComputePassId, ComputePipelineId,
};
use hal;
use hal::command::RawCommandBuffer;
use std::iter;
@ -9,11 +15,20 @@ use std::iter;
pub struct ComputePass<B: hal::Backend> {
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,
binder: Binder,
buffer_tracker: BufferTracker,
texture_tracker: TextureTracker,
}
impl<B: hal::Backend> ComputePass<B> {
pub(crate) fn new(raw: B::CommandBuffer, cmb_id: Stored<CommandBufferId>) -> Self {
ComputePass { raw, cmb_id }
ComputePass {
raw,
cmb_id,
binder: Binder::default(),
buffer_tracker: BufferTracker::new(),
texture_tracker: TextureTracker::new(),
}
}
}
@ -29,43 +44,6 @@ pub extern "C" fn wgpu_compute_pass_end_pass(pass_id: ComputePassId) -> CommandB
pass.cmb_id.value
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_set_bind_group(
pass_id: ComputePassId,
index: u32,
bind_group_id: BindGroupId,
) {
let bind_group_guard = HUB.bind_groups.read();
let set = &bind_group_guard.get(bind_group_id).raw;
let layout = unimplemented!();
// see https://github.com/gpuweb/gpuweb/pull/93
unsafe {
HUB.compute_passes
.write()
.get_mut(pass_id)
.raw
.bind_compute_descriptor_sets(layout, index as usize, iter::once(set), &[]);
}
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_set_pipeline(
pass_id: ComputePassId,
pipeline_id: ComputePipelineId,
) {
let pipeline_guard = HUB.compute_pipelines.read();
let pipeline = &pipeline_guard.get(pipeline_id).raw;
unsafe {
HUB.compute_passes
.write()
.get_mut(pass_id)
.raw
.bind_compute_pipeline(pipeline);
}
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_dispatch(pass_id: ComputePassId, x: u32, y: u32, z: u32) {
unsafe {
@ -76,3 +54,80 @@ pub extern "C" fn wgpu_compute_pass_dispatch(pass_id: ComputePassId, x: u32, y:
.dispatch([x, y, z]);
}
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_set_bind_group(
pass_id: ComputePassId,
index: u32,
bind_group_id: BindGroupId,
) {
let mut pass_guard = HUB.compute_passes.write();
let pass = pass_guard.get_mut(pass_id);
let bind_group_guard = HUB.bind_groups.read();
let bind_group = bind_group_guard.get(bind_group_id);
CommandBuffer::insert_barriers(
&mut pass.raw,
pass.buffer_tracker.consume_by_replace(&bind_group.used_buffers),
pass.texture_tracker.consume_by_replace(&bind_group.used_textures),
&*HUB.buffers.read(),
&*HUB.textures.read(),
);
if let Some(pipeline_layout_id) = pass.binder.provide_entry(index as usize, bind_group_id, bind_group) {
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let pipeline_layout = pipeline_layout_guard.get(pipeline_layout_id);
unsafe {
pass.raw.bind_compute_descriptor_sets(
&pipeline_layout.raw,
index as usize,
iter::once(&bind_group.raw),
&[],
);
}
}
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_set_pipeline(
pass_id: ComputePassId,
pipeline_id: ComputePipelineId,
) {
let mut pass_guard = HUB.compute_passes.write();
let pass = pass_guard.get_mut(pass_id);
let pipeline_guard = HUB.compute_pipelines.read();
let pipeline = pipeline_guard.get(pipeline_id);
unsafe {
pass.raw.bind_compute_pipeline(&pipeline.raw);
}
if pass.binder.pipeline_layout_id == Some(pipeline.layout_id.clone()) {
return
}
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let pipeline_layout = pipeline_layout_guard.get(pipeline.layout_id.0);
let bing_group_guard = HUB.bind_groups.read();
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
pass.binder.ensure_length(pipeline_layout.bind_group_layout_ids.len());
for (index, (entry, bgl_id)) in pass.binder.entries
.iter_mut()
.zip(&pipeline_layout.bind_group_layout_ids)
.enumerate()
{
if let Some(bg_id) = entry.expect_layout(bgl_id.0) {
let bind_group = bing_group_guard.get(bg_id);
unsafe {
pass.raw.bind_compute_descriptor_sets(
&pipeline_layout.raw,
index,
iter::once(&bind_group.raw),
&[]
);
}
}
}
}

View File

@ -1,4 +1,5 @@
mod allocator;
mod bind;
mod compute;
mod render;
@ -25,7 +26,7 @@ use log::trace;
use std::collections::hash_map::Entry;
use std::ops::Range;
use std::slice;
use std::{iter, slice};
use std::thread::ThreadId;
@ -335,6 +336,11 @@ pub extern "C" fn wgpu_command_buffer_begin_render_pass(
clear_values,
hal::command::SubpassContents::Inline,
);
current_comb.set_scissors(0, iter::once(&rect));
current_comb.set_viewports(0, iter::once(hal::pso::Viewport {
rect,
depth: 0.0 .. 1.0,
}));
}
HUB.render_passes.write().register(RenderPass::new(

View File

@ -1,13 +1,21 @@
use crate::command::bind::Binder;
use crate::resource::BufferUsageFlags;
use crate::registry::{Items, HUB};
use crate::track::{BufferTracker, TextureTracker};
use crate::{CommandBuffer, CommandBufferId, RenderPassId, Stored};
use crate::track::{BufferTracker, TextureTracker, TrackPermit};
use crate::{
CommandBuffer, Stored,
BindGroupId, BufferId, CommandBufferId, RenderPassId, RenderPipelineId,
};
use hal::command::RawCommandBuffer;
use std::iter;
pub struct RenderPass<B: hal::Backend> {
raw: B::CommandBuffer,
cmb_id: Stored<CommandBufferId>,
binder: Binder,
buffer_tracker: BufferTracker,
texture_tracker: TextureTracker,
}
@ -17,6 +25,7 @@ impl<B: hal::Backend> RenderPass<B> {
RenderPass {
raw,
cmb_id,
binder: Binder::default(),
buffer_tracker: BufferTracker::new(),
texture_tracker: TextureTracker::new(),
}
@ -36,8 +45,8 @@ pub extern "C" fn wgpu_render_pass_end_pass(pass_id: RenderPassId) -> CommandBuf
if let Some(ref mut last) = cmb.raw.last_mut() {
CommandBuffer::insert_barriers(
last,
cmb.buffer_tracker.consume(&pass.buffer_tracker),
cmb.texture_tracker.consume(&pass.texture_tracker),
cmb.buffer_tracker.consume_by_replace(&pass.buffer_tracker),
cmb.texture_tracker.consume_by_replace(&pass.texture_tracker),
&*HUB.buffers.read(),
&*HUB.textures.read(),
);
@ -47,3 +56,180 @@ pub extern "C" fn wgpu_render_pass_end_pass(pass_id: RenderPassId) -> CommandBuf
cmb.raw.push(pass.raw);
pass.cmb_id.value
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_index_buffer(
pass_id: RenderPassId, buffer_id: BufferId, offset: u32
) {
let mut pass_guard = HUB.render_passes.write();
let buffer_guard = HUB.buffers.read();
let pass = pass_guard.get_mut(pass_id);
let (buffer, _) = pass.buffer_tracker
.get_with_usage(
&*buffer_guard,
buffer_id,
BufferUsageFlags::INDEX,
TrackPermit::EXTEND,
)
.unwrap();
buffer_guard.get(buffer_id);
let view = hal::buffer::IndexBufferView {
buffer: &buffer.raw,
offset: offset as u64,
index_type: hal::IndexType::U16, //TODO?
};
unsafe {
pass.raw.bind_index_buffer(view);
}
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_vertex_buffers(
pass_id: RenderPassId, buffers: &[BufferId], offsets: &[u32]
) {
let mut pass_guard = HUB.render_passes.write();
let buffer_guard = HUB.buffers.read();
let pass = pass_guard.get_mut(pass_id);
for &id in buffers {
pass.buffer_tracker
.get_with_usage(
&*buffer_guard,
id,
BufferUsageFlags::VERTEX,
TrackPermit::EXTEND,
)
.unwrap();
}
assert_eq!(buffers.len(), offsets.len());
let buffers = buffers
.iter()
.map(|&id| &buffer_guard.get(id).raw)
.zip(offsets.iter().map(|&off| off as u64));
unsafe {
pass.raw.bind_vertex_buffers(0, buffers);
}
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_draw(
pass_id: RenderPassId,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) {
unsafe {
HUB.render_passes
.write()
.get_mut(pass_id)
.raw
.draw(
first_vertex .. first_vertex + vertex_count,
first_instance .. first_instance + instance_count,
);
}
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_draw_indexed(
pass_id: RenderPassId,
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
) {
unsafe {
HUB.render_passes
.write()
.get_mut(pass_id)
.raw
.draw_indexed(
first_index .. first_index + index_count,
base_vertex,
first_instance .. first_instance + instance_count,
);
}
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_bind_group(
pass_id: RenderPassId,
index: u32,
bind_group_id: BindGroupId,
) {
let mut pass_guard = HUB.render_passes.write();
let pass = pass_guard.get_mut(pass_id);
let bind_group_guard = HUB.bind_groups.read();
let bind_group = bind_group_guard.get(bind_group_id);
pass.buffer_tracker
.consume_by_extend(&bind_group.used_buffers)
.unwrap();
pass.texture_tracker
.consume_by_extend(&bind_group.used_textures)
.unwrap();
if let Some(pipeline_layout_id) = pass.binder.provide_entry(index as usize, bind_group_id, bind_group) {
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let pipeline_layout = pipeline_layout_guard.get(pipeline_layout_id);
unsafe {
pass.raw.bind_graphics_descriptor_sets(
&pipeline_layout.raw,
index as usize,
iter::once(&bind_group.raw),
&[],
);
}
}
}
#[no_mangle]
pub extern "C" fn wgpu_render_pass_set_pipeline(
pass_id: RenderPassId,
pipeline_id: RenderPipelineId,
) {
let mut pass_guard = HUB.render_passes.write();
let pass = pass_guard.get_mut(pass_id);
let pipeline_guard = HUB.render_pipelines.read();
let pipeline = pipeline_guard.get(pipeline_id);
unsafe {
pass.raw.bind_graphics_pipeline(&pipeline.raw);
}
if pass.binder.pipeline_layout_id == Some(pipeline.layout_id.clone()) {
return
}
let pipeline_layout_guard = HUB.pipeline_layouts.read();
let pipeline_layout = pipeline_layout_guard.get(pipeline.layout_id.0);
let bing_group_guard = HUB.bind_groups.read();
pass.binder.pipeline_layout_id = Some(pipeline.layout_id.clone());
pass.binder.ensure_length(pipeline_layout.bind_group_layout_ids.len());
for (index, (entry, bgl_id)) in pass.binder.entries
.iter_mut()
.zip(&pipeline_layout.bind_group_layout_ids)
.enumerate()
{
if let Some(bg_id) = entry.expect_layout(bgl_id.0) {
let bind_group = bing_group_guard.get(bg_id);
unsafe {
pass.raw.bind_graphics_descriptor_sets(
&pipeline_layout.raw,
index,
iter::once(&bind_group.raw),
&[]
);
}
}
}
}

View File

@ -1,10 +1,10 @@
use crate::{back, binding_model, command, conv, pipeline, resource, swap_chain};
use crate::registry::{HUB, Items};
use crate::track::{BufferTracker, TextureTracker};
use crate::track::{BufferTracker, TextureTracker, TrackPermit};
use crate::{
CommandBuffer, LifeGuard, RefCount, Stored, SubmissionIndex, WeaklyStored,
TextureUsageFlags,
BindGroupLayoutId, BlendStateId, BufferId, CommandBufferId, DepthStencilStateId,
LifeGuard, RefCount, Stored, SubmissionIndex, WeaklyStored,
BindGroupLayoutId, BindGroupId,
BlendStateId, BufferId, CommandBufferId, DepthStencilStateId,
AdapterId, DeviceId, PipelineLayoutId, QueueId, RenderPipelineId, ShaderModuleId,
TextureId, TextureViewId,
SurfaceId, SwapChainId,
@ -12,11 +12,15 @@ use crate::{
use hal::command::RawCommandBuffer;
use hal::queue::RawCommandQueue;
use hal::{self, Device as _Device, Surface as _Surface};
use hal::{self,
DescriptorPool as _DescriptorPool,
Device as _Device,
Surface as _Surface,
};
//use rendy_memory::{allocator, Config, Heaps};
use parking_lot::{Mutex};
use std::{ffi, slice};
use std::{ffi, iter, slice};
use std::collections::hash_map::{Entry, HashMap};
use std::sync::atomic::Ordering;
@ -150,7 +154,7 @@ pub struct Device<B: hal::Backend> {
mem_props: hal::MemoryProperties,
pub(crate) render_passes: Mutex<HashMap<RenderPassKey, B::RenderPass>>,
pub(crate) framebuffers: Mutex<HashMap<FramebufferKey, B::Framebuffer>>,
last_submission_index: SubmissionIndex,
desc_pool: Mutex<B::DescriptorPool>,
destroyed: Mutex<DestroyedResources<B>>,
}
@ -182,6 +186,33 @@ impl<B: hal::Backend> Device<B> {
)
};*/
//TODO: generic allocator for descriptors
let desc_pool = Mutex::new(
unsafe {
raw.create_descriptor_pool(
100,
&[
hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Sampler,
count: 100,
},
hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::SampledImage,
count: 100,
},
hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::UniformBuffer,
count: 100,
},
hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::StorageBuffer,
count: 100,
},
],
)
}.unwrap()
);
Device {
raw,
adapter_id,
@ -194,7 +225,7 @@ impl<B: hal::Backend> Device<B> {
mem_props,
render_passes: Mutex::new(HashMap::new()),
framebuffers: Mutex::new(HashMap::new()),
last_submission_index: 0,
desc_pool,
destroyed: Mutex::new(DestroyedResources {
referenced: Vec::new(),
active: Vec::new(),
@ -208,6 +239,82 @@ pub(crate) struct ShaderModule<B: hal::Backend> {
pub raw: B::ShaderModule,
}
#[no_mangle]
pub extern "C" fn wgpu_device_create_buffer(
device_id: DeviceId,
desc: &resource::BufferDescriptor,
) -> BufferId {
let device_guard = HUB.devices.read();
let device = &device_guard.get(device_id);
let (usage, _) = conv::map_buffer_usage(desc.usage);
let mut buffer = unsafe {
device.raw.create_buffer(desc.size as u64, usage).unwrap()
};
let requirements = unsafe {
device.raw.get_buffer_requirements(&buffer)
};
let device_type = device
.mem_props
.memory_types
.iter()
.enumerate()
.position(|(id, memory_type)| {
// TODO
requirements.type_mask & (1 << id) != 0
&& memory_type
.properties
.contains(hal::memory::Properties::DEVICE_LOCAL)
})
.unwrap()
.into();
// TODO: allocate with rendy
let memory = unsafe {
device.raw
.allocate_memory(device_type, requirements.size)
.unwrap()
};
unsafe {
device.raw
.bind_buffer_memory(&memory, 0, &mut buffer)
.unwrap()
};
let life_guard = LifeGuard::new();
let ref_count = life_guard.ref_count.clone();
let id = HUB.buffers
.write()
.register(resource::Buffer {
raw: buffer,
device_id: Stored {
value: device_id,
ref_count: device.life_guard.ref_count.clone(),
},
life_guard,
});
let query = device.buffer_tracker
.lock()
.query(
&Stored { value: id, ref_count },
resource::BufferUsageFlags::WRITE_ALL,
);
assert!(query.initialized);
id
}
#[no_mangle]
pub extern "C" fn wgpu_buffer_destroy(buffer_id: BufferId) {
let buffer_guard = HUB.buffers.read();
let buffer = buffer_guard.get(buffer_id);
HUB.devices
.read()
.get(buffer.device_id.value)
.destroyed
.lock()
.add(ResourceId::Buffer(buffer_id), &buffer.life_guard);
}
#[no_mangle]
pub extern "C" fn wgpu_device_create_texture(
device_id: DeviceId,
@ -219,7 +326,8 @@ pub extern "C" fn wgpu_device_create_texture(
let usage = conv::map_texture_usage(desc.usage, aspects);
let device_guard = HUB.devices.read();
let device = &device_guard.get(device_id);
let mut image_unbound = unsafe {
let mut image = unsafe {
device.raw.create_image(
kind,
1, // TODO: mips
@ -230,7 +338,9 @@ pub extern "C" fn wgpu_device_create_texture(
)
}
.unwrap();
let image_req = unsafe { device.raw.get_image_requirements(&image_unbound) };
let requirements = unsafe {
device.raw.get_image_requirements(&image)
};
let device_type = device
.mem_props
.memory_types
@ -238,7 +348,7 @@ pub extern "C" fn wgpu_device_create_texture(
.enumerate()
.position(|(id, memory_type)| {
// TODO
image_req.type_mask & (1 << id) != 0
requirements.type_mask & (1 << id) != 0
&& memory_type
.properties
.contains(hal::memory::Properties::DEVICE_LOCAL)
@ -246,14 +356,16 @@ pub extern "C" fn wgpu_device_create_texture(
.unwrap()
.into();
// TODO: allocate with rendy
let image_memory = unsafe { device.raw.allocate_memory(device_type, image_req.size) }.unwrap();
let memory = unsafe {
device.raw
.allocate_memory(device_type, requirements.size)
.unwrap()
};
unsafe {
device
.raw
.bind_image_memory(&image_memory, 0, &mut image_unbound)
}
.unwrap();
let bound_image = image_unbound; //TODO: Maybe call this image the same way in the first place
device.raw
.bind_image_memory(&memory, 0, &mut image)
.unwrap()
};
let full_range = hal::image::SubresourceRange {
aspects,
@ -266,7 +378,7 @@ pub extern "C" fn wgpu_device_create_texture(
let id = HUB.textures
.write()
.register(resource::Texture {
raw: bound_image,
raw: image,
device_id: Stored {
value: device_id,
ref_count: device.life_guard.ref_count.clone(),
@ -281,7 +393,7 @@ pub extern "C" fn wgpu_device_create_texture(
.lock()
.query(
&Stored { value: id, ref_count },
TextureUsageFlags::WRITE_ALL,
resource::TextureUsageFlags::WRITE_ALL,
);
assert!(query.initialized);
@ -377,8 +489,8 @@ pub extern "C" fn wgpu_texture_create_default_texture_view(texture_id: TextureId
pub extern "C" fn wgpu_texture_destroy(texture_id: TextureId) {
let texture_guard = HUB.textures.read();
let texture = texture_guard.get(texture_id);
let device_guard = HUB.devices.write();
device_guard
HUB.devices
.read()
.get(texture.device_id.value)
.destroyed
.lock()
@ -429,10 +541,11 @@ pub extern "C" fn wgpu_device_create_pipeline_layout(
device_id: DeviceId,
desc: &binding_model::PipelineLayoutDescriptor,
) -> PipelineLayoutId {
let bind_group_layouts =
unsafe { slice::from_raw_parts(desc.bind_group_layouts, desc.bind_group_layouts_length) };
let bind_group_layout_ids = unsafe {
slice::from_raw_parts(desc.bind_group_layouts, desc.bind_group_layouts_length)
};
let bind_group_layout_guard = HUB.bind_group_layouts.read();
let descriptor_set_layouts = bind_group_layouts
let descriptor_set_layouts = bind_group_layout_ids
.iter()
.map(|&id| &bind_group_layout_guard.get(id).raw);
@ -450,6 +563,94 @@ pub extern "C" fn wgpu_device_create_pipeline_layout(
.write()
.register(binding_model::PipelineLayout {
raw: pipeline_layout,
bind_group_layout_ids: bind_group_layout_ids
.iter()
.cloned()
.map(WeaklyStored)
.collect(),
})
}
#[no_mangle]
pub extern "C" fn wgpu_device_create_bind_group(
device_id: DeviceId,
desc: &binding_model::BindGroupDescriptor,
) -> BindGroupId {
let device_guard = HUB.devices.read();
let device = device_guard.get(device_id);
let bind_group_layout_guard = HUB.bind_group_layouts.read();
let bind_group_layout = bind_group_layout_guard.get(desc.layout);
let bindings = unsafe {
slice::from_raw_parts(desc.bindings, desc.bindings_length as usize)
};
let mut desc_pool = device.desc_pool.lock();
let desc_set = unsafe {
desc_pool
.allocate_set(&bind_group_layout.raw)
.unwrap()
};
let buffer_guard = HUB.buffers.read();
let sampler_guard = HUB.samplers.read();
let texture_view_guard = HUB.texture_views.read();
//TODO: group writes into contiguous sections
let mut writes = Vec::new();
let mut used_buffers = BufferTracker::new();
let mut used_textures = TextureTracker::new();
for b in bindings {
let descriptor = match b.resource {
binding_model::BindingResource::Buffer(ref bb) => {
let (buffer, _) = used_buffers
.get_with_usage(
&*buffer_guard,
bb.buffer,
resource::BufferUsageFlags::UNIFORM,
TrackPermit::EXTEND,
)
.unwrap();
let range = Some(bb.offset as u64) .. Some((bb.offset + bb.size) as u64);
hal::pso::Descriptor::Buffer(&buffer.raw, range)
}
binding_model::BindingResource::Sampler(id) => {
let sampler = sampler_guard.get(id);
hal::pso::Descriptor::Sampler(&sampler.raw)
}
binding_model::BindingResource::TextureView(id) => {
let view = texture_view_guard.get(id);
used_textures
.transit(
view.texture_id.value,
&view.texture_id.ref_count,
resource::TextureUsageFlags::SAMPLED,
TrackPermit::EXTEND,
)
.unwrap();
hal::pso::Descriptor::Image(&view.raw, hal::image::Layout::ShaderReadOnlyOptimal)
}
};
let write = hal::pso::DescriptorSetWrite {
set: &desc_set,
binding: b.binding,
array_offset: 0, //TODO
descriptors: iter::once(descriptor),
};
writes.push(write);
}
unsafe {
device.raw.write_descriptor_sets(writes);
}
HUB.bind_groups
.write()
.register(binding_model::BindGroup {
raw: desc_set,
layout_id: WeaklyStored(desc.layout),
life_guard: LifeGuard::new(),
used_buffers,
used_textures,
})
}
@ -581,10 +782,10 @@ pub extern "C" fn wgpu_queue_submit(
);
}
//TODO: fix the consume
CommandBuffer::insert_barriers(
command::CommandBuffer::insert_barriers(
&mut transit,
buffer_tracker.consume(&comb.buffer_tracker),
texture_tracker.consume(&comb.texture_tracker),
buffer_tracker.consume_by_replace(&comb.buffer_tracker),
texture_tracker.consume_by_replace(&comb.texture_tracker),
&*buffer_guard,
&*texture_guard,
);
@ -641,12 +842,12 @@ pub extern "C" fn wgpu_queue_submit(
last_done
};
device.com_allocator.maintain(&device.raw, last_done);
device.com_allocator.maintain(last_done);
// finally, return the command buffers to the allocator
for &cmb_id in command_buffer_ids {
let cmd_buf = command_buffer_guard.take(cmb_id);
device.com_allocator.submit(cmd_buf);
device.com_allocator.after_submit(cmd_buf);
}
}
@ -655,12 +856,6 @@ pub extern "C" fn wgpu_device_create_render_pipeline(
device_id: DeviceId,
desc: &pipeline::RenderPipelineDescriptor,
) -> RenderPipelineId {
// TODO
let extent = hal::window::Extent2D {
width: 100,
height: 100,
};
let device_guard = HUB.devices.read();
let device = device_guard.get(device_id);
let pipeline_layout_guard = HUB.pipeline_layouts.read();
@ -814,21 +1009,8 @@ pub extern "C" fn wgpu_device_create_render_pipeline(
// TODO
let baked_states = hal::pso::BakedStates {
viewport: Some(hal::pso::Viewport {
rect: hal::pso::Rect {
x: 0,
y: 0,
w: extent.width as i16,
h: extent.height as i16,
},
depth: (0.0..1.0),
}),
scissor: Some(hal::pso::Rect {
x: 0,
y: 0,
w: extent.width as i16,
h: extent.height as i16,
}),
viewport: None,
scissor: None,
blend_color: None,
depth_bounds: None,
};
@ -864,7 +1046,10 @@ pub extern "C" fn wgpu_device_create_render_pipeline(
HUB.render_pipelines
.write()
.register(pipeline::RenderPipeline { raw: pipeline })
.register(pipeline::RenderPipeline {
raw: pipeline,
layout_id: WeaklyStored(desc.layout),
})
}
#[no_mangle]
@ -987,7 +1172,7 @@ pub extern "C" fn wgpu_device_create_swap_chain(
};
device.texture_tracker
.lock()
.query(&texture_id, TextureUsageFlags::WRITE_ALL);
.query(&texture_id, resource::TextureUsageFlags::WRITE_ALL);
let view = resource::TextureView {
raw: view_raw,

View File

@ -194,6 +194,7 @@ type TextureViewHandle = TextureView<B>;
pub type TextureId = Id;
type TextureHandle = Texture<B>;
pub type SamplerId = Id;
type SamplerHandle = Sampler<B>;
// Binding model
pub type BindGroupLayoutId = Id;

View File

@ -1,5 +1,8 @@
use crate::resource;
use crate::{BlendStateId, ByteArray, DepthStencilStateId, PipelineLayoutId, ShaderModuleId};
use crate::{
ByteArray, WeaklyStored,
BlendStateId, DepthStencilStateId, PipelineLayoutId, ShaderModuleId,
};
use bitflags::bitflags;
@ -212,6 +215,7 @@ pub struct ComputePipelineDescriptor {
pub(crate) struct ComputePipeline<B: hal::Backend> {
pub raw: B::ComputePipeline,
pub layout_id: WeaklyStored<PipelineLayoutId>,
}
#[repr(C)]
@ -251,4 +255,5 @@ pub struct RenderPipelineDescriptor {
pub(crate) struct RenderPipeline<B: hal::Backend> {
pub raw: B::GraphicsPipeline,
pub layout_id: WeaklyStored<PipelineLayoutId>,
}

View File

@ -3,7 +3,7 @@ use crate::{
BlendStateHandle, CommandBufferHandle, DepthStencilStateHandle, DeviceHandle, InstanceHandle,
RenderPassHandle, ComputePassHandle,
PipelineLayoutHandle, RenderPipelineHandle, ComputePipelineHandle, ShaderModuleHandle,
BufferHandle, TextureHandle, TextureViewHandle,
BufferHandle, SamplerHandle, TextureHandle, TextureViewHandle,
SurfaceHandle, SwapChainHandle,
};
@ -51,6 +51,7 @@ pub struct Hub {
pub(crate) buffers: ConcreteRegistry<BufferHandle>,
pub(crate) textures: ConcreteRegistry<TextureHandle>,
pub(crate) texture_views: ConcreteRegistry<TextureViewHandle>,
pub(crate) samplers: ConcreteRegistry<SamplerHandle>,
pub(crate) surfaces: ConcreteRegistry<SurfaceHandle>,
pub(crate) swap_chains: ConcreteRegistry<SwapChainHandle>,
}

View File

@ -1,5 +1,5 @@
use crate::{
Extent3d, LifeGuard, Stored,
Extent3d, LifeGuard, RefCount, Stored,
DeviceId, TextureId,
};
use crate::swap_chain::{SwapChainLink, SwapImageEpoch};
@ -8,6 +8,8 @@ use bitflags::bitflags;
use hal;
use parking_lot::Mutex;
use std::borrow::Borrow;
bitflags! {
#[repr(transparent)]
@ -32,13 +34,19 @@ pub struct BufferDescriptor {
}
pub(crate) struct Buffer<B: hal::Backend> {
//pub raw: B::UnboundBuffer,
pub raw: B::Buffer,
pub memory_properties: hal::memory::Properties,
pub device_id: Stored<DeviceId>,
//pub memory_properties: hal::memory::Properties,
pub life_guard: LifeGuard,
// TODO: mapping, unmap()
}
impl<B: hal::Backend> Borrow<RefCount> for Buffer<B> {
fn borrow(&self) -> &RefCount {
&self.life_guard.ref_count
}
}
#[repr(C)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
pub enum TextureDimension {
@ -89,6 +97,12 @@ pub(crate) struct Texture<B: hal::Backend> {
pub life_guard: LifeGuard,
}
impl<B: hal::Backend> Borrow<RefCount> for Texture<B> {
fn borrow(&self) -> &RefCount {
&self.life_guard.ref_count
}
}
bitflags! {
#[repr(transparent)]
pub struct TextureAspectFlags: u32 {
@ -182,6 +196,6 @@ pub struct SamplerDescriptor {
pub border_color: BorderColor,
}
pub struct Sampler {
// TODO
pub(crate) struct Sampler<B: hal::Backend> {
pub raw: B::Sampler,
}

View File

@ -1,6 +1,8 @@
use crate::registry::{Id, Items};
use crate::resource::{BufferUsageFlags, TextureUsageFlags};
use crate::{BufferId, RefCount, Stored, TextureId, WeaklyStored};
use std::borrow::Borrow;
use std::collections::hash_map::{Entry, HashMap};
use std::hash::Hash;
use std::mem;
@ -26,7 +28,12 @@ pub struct Query<T> {
bitflags! {
pub struct TrackPermit: u32 {
/// Allow extension of the current usage. This is useful during render pass
/// recording, where the usage has to stay constant, but we can defer the
/// decision on what it is until the end of the pass.
const EXTEND = 1;
/// Allow replacing the current usage with the new one. This is useful when
/// recording a command buffer live, and the current usage is already been set.
const REPLACE = 2;
}
}
@ -63,7 +70,7 @@ pub type BufferTracker = Tracker<BufferId, BufferUsageFlags>;
pub type TextureTracker = Tracker<TextureId, TextureUsageFlags>;
impl<I: Clone + Hash + Eq, U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker<I, U> {
pub(crate) fn new() -> Self {
pub fn new() -> Self {
Tracker {
map: HashMap::new(),
}
@ -125,7 +132,7 @@ impl<I: Clone + Hash + Eq, U: Copy + GenericUsage + BitOr<Output = U> + PartialE
}
/// Consume another tacker, adding it's transitions to `self`.
pub fn consume<'a>(&'a mut self, other: &'a Self) -> impl 'a + Iterator<Item = (I, Range<U>)> {
pub fn consume_by_replace<'a>(&'a mut self, other: &'a Self) -> impl 'a + Iterator<Item = (I, Range<U>)> {
other.map.iter().flat_map(move |(id, new)| {
match self.map.entry(WeaklyStored(id.0.clone())) {
Entry::Vacant(e) => {
@ -144,8 +151,43 @@ impl<I: Clone + Hash + Eq, U: Copy + GenericUsage + BitOr<Output = U> + PartialE
})
}
pub fn consume_by_extend<'a>(&'a mut self, other: &'a Self) -> Result<(), (I, Range<U>)> {
for (id, new) in other.map.iter() {
match self.map.entry(WeaklyStored(id.0.clone())) {
Entry::Vacant(e) => {
e.insert(new.clone());
}
Entry::Occupied(mut e) => {
let old = e.get().last;
if old != new.last {
let extended = old | new.last;
if extended.is_exclusive() {
return Err((id.0.clone(), old..new.last));
}
e.get_mut().last = extended;
}
}
}
}
Ok(())
}
/// Return an iterator over used resources keys.
pub fn used<'a>(&'a self) -> impl 'a + Iterator<Item = I> {
self.map.keys().map(|&WeaklyStored(ref id)| id.clone())
}
}
impl<U: Copy + GenericUsage + BitOr<Output = U> + PartialEq> Tracker<Id, U> {
pub(crate) fn get_with_usage<'a, T: 'a + Borrow<RefCount>, V: Items<T>>(
&mut self,
items: &'a V,
id: Id,
usage: U,
permit: TrackPermit,
) -> Result<(&'a T, Tracktion<U>), U> {
let item = items.get(id);
self.transit(id, item.borrow(), usage, permit)
.map(|tracktion| (item, tracktion))
}
}

View File

@ -4,6 +4,7 @@ extern crate wgpu_native as wgn;
use arrayvec::ArrayVec;
use std::ffi::CString;
use std::ops::Range;
use std::ptr;
pub use wgn::{
@ -13,7 +14,7 @@ pub use wgn::{
RenderPassColorAttachmentDescriptor, RenderPassDepthStencilAttachmentDescriptor,
ShaderModuleDescriptor, ShaderStage, ShaderStageFlags, StoreOp,
TextureDescriptor, TextureDimension, TextureFormat, TextureUsageFlags, TextureViewDescriptor,
SwapChainDescriptor,
BufferDescriptor, SwapChainDescriptor,
};
pub struct Instance {
@ -28,6 +29,10 @@ pub struct Device {
id: wgn::DeviceId,
}
pub struct Buffer {
id: wgn::BufferId,
}
pub struct Texture {
id: wgn::TextureId,
}
@ -272,9 +277,15 @@ impl Device {
}
}
pub fn create_buffer(&self, desc: &BufferDescriptor) -> Buffer {
Buffer {
id: wgn::wgpu_device_create_buffer(self.id, desc),
}
}
pub fn create_texture(&self, desc: &TextureDescriptor) -> Texture {
Texture {
id: wgn::wgpu_device_create_texture(self.id, &desc),
id: wgn::wgpu_device_create_texture(self.id, desc),
}
}
@ -288,7 +299,7 @@ impl Device {
impl Texture {
pub fn create_texture_view(&self, desc: &TextureViewDescriptor) -> TextureView {
TextureView {
id: wgn::wgpu_texture_create_texture_view(self.id, &desc),
id: wgn::wgpu_texture_create_texture_view(self.id, desc),
}
}
@ -353,6 +364,39 @@ impl<'a> RenderPass<'a> {
wgn::wgpu_render_pass_end_pass(self.id);
self.parent
}
pub fn set_bind_group(&mut self, index: u32, bind_group: &BindGroup) {
wgn::wgpu_render_pass_set_bind_group(self.id, index, bind_group.id);
}
pub fn set_pipeline(&mut self, pipeline: &RenderPipeline) {
wgn::wgpu_render_pass_set_pipeline(self.id, pipeline.id);
}
pub fn draw(
&mut self, vertices: Range<u32>, instances: Range<u32>
) {
wgn::wgpu_render_pass_draw(
self.id,
vertices.end - vertices.start,
instances.end - instances.start,
vertices.start,
instances.start,
);
}
pub fn draw_indexed(
&mut self, indices: Range<u32>, base_vertex: i32, instances: Range<u32>
) {
wgn::wgpu_render_pass_draw_indexed(
self.id,
indices.end - indices.start,
instances.end - instances.start,
indices.start,
base_vertex,
instances.start,
);
}
}
impl<'a> ComputePass<'a> {