18: Compute resource binding r=grovesNL a=kvark

Again, this isn't complete/usable yet, blocked by https://github.com/gpuweb/gpuweb/pull/93
But we can merge and move forward.

Co-authored-by: Dzmitry Malyshau <kvark@mozilla.com>
This commit is contained in:
bors[bot] 2018-10-03 11:29:47 +00:00
commit 1855e22d0c
9 changed files with 134 additions and 46 deletions

View File

@ -108,6 +108,8 @@ typedef enum {
WGPUTextureFormat_D32FloatS8Uint = 3,
} WGPUTextureFormat;
typedef struct WGPURenderPassDescriptor_WGPUTextureViewId WGPURenderPassDescriptor_WGPUTextureViewId;
typedef WGPUId WGPUDeviceId;
typedef WGPUId WGPUAdapterId;
@ -122,10 +124,10 @@ typedef struct {
typedef WGPUId WGPUComputePassId;
typedef WGPUId WGPURenderPassId;
typedef WGPUId WGPUCommandBufferId;
typedef WGPUId WGPURenderPassId;
typedef WGPUId WGPUInstanceId;
typedef WGPUId WGPUAttachmentStateId;
@ -234,9 +236,12 @@ typedef struct {
WGPUDeviceId wgpu_adapter_create_device(WGPUAdapterId adapter_id, WGPUDeviceDescriptor _desc);
WGPUComputePassId wgpu_command_buffer_begin_compute_pass(void);
WGPUComputePassId wgpu_command_buffer_begin_compute_pass(WGPUCommandBufferId command_buffer_id);
WGPURenderPassId wgpu_command_buffer_begin_render_pass(WGPUCommandBufferId _command_buffer);
WGPURenderPassId wgpu_command_buffer_begin_render_pass(WGPUCommandBufferId command_buffer_id,
WGPURenderPassDescriptor_WGPUTextureViewId _descriptor);
WGPUCommandBufferId wgpu_compute_pass_end_pass(WGPUComputePassId pass_id);
WGPUInstanceId wgpu_create_instance(void);
@ -252,7 +257,7 @@ WGPUBlendStateId wgpu_device_create_blend_state(WGPUDeviceId _device_id,
WGPUCommandBufferId wgpu_device_create_command_buffer(WGPUDeviceId device_id,
WGPUCommandBufferDescriptor _desc);
WGPUDepthStencilStateId wgpu_device_create_depth_stencil_state(WGPUDeviceId device_id,
WGPUDepthStencilStateId wgpu_device_create_depth_stencil_state(WGPUDeviceId _device_id,
WGPUDepthStencilStateDescriptor desc);
WGPUPipelineLayoutId wgpu_device_create_pipeline_layout(WGPUDeviceId device_id,
@ -271,3 +276,5 @@ WGPUAdapterId wgpu_instance_get_adapter(WGPUInstanceId instance_id, WGPUAdapterD
void wgpu_queue_submit(WGPUQueueId queue_id,
const WGPUCommandBufferId *command_buffer_ptr,
uintptr_t command_buffer_count);
WGPUCommandBufferId wgpu_render_pass_end_pass(WGPURenderPassId pass_id);

View File

@ -76,6 +76,6 @@ pub struct BindGroupDescriptor {
pub bindings_length: usize,
}
pub struct BindGroup {
// TODO
pub(crate) struct BindGroup<B: hal::Backend> {
pub raw: B::DescriptorSet,
}

View File

@ -1,11 +1,14 @@
use hal;
use registry::{HUB, Items, Registry};
use {
Stored,
CommandBufferId, ComputePassId
BindGroupId, CommandBufferId, ComputePassId, ComputePipelineId,
};
use hal;
use hal::command::RawCommandBuffer;
use std::iter;
pub struct ComputePass<B: hal::Backend> {
raw: B::CommandBuffer,
@ -35,3 +38,41 @@ pub extern "C" fn wgpu_compute_pass_end_pass(
.raw = Some(pass.raw);
pass.cmb_id.0
}
pub extern "C" fn wgpu_compute_pass_set_bind_group(
pass_id: ComputePassId, index: u32, bind_group_id: BindGroupId,
) {
let bind_group_guard = HUB.bind_groups.lock();
let set = &bind_group_guard.get(bind_group_id).raw;
let layout = unimplemented!();
// see https://github.com/gpuweb/gpuweb/pull/93
HUB.compute_passes
.lock()
.get_mut(pass_id)
.raw
.bind_compute_descriptor_sets(layout, index as usize, iter::once(set), &[]);
}
pub extern "C" fn wgpu_compute_pass_set_pipeline(
pass_id: ComputePassId, pipeline_id: ComputePipelineId,
) {
let pipeline_guard = HUB.compute_pipelines.lock();
let pipeline = &pipeline_guard.get(pipeline_id).raw;
HUB.compute_passes
.lock()
.get_mut(pass_id)
.raw
.bind_compute_pipeline(pipeline);
}
pub extern "C" fn wgpu_compute_pass_dispatch(
pass_id: ComputePassId, x: u32, y: u32, z: u32,
) {
HUB.compute_passes
.lock()
.get_mut(pass_id)
.raw
.dispatch([x, y, z]);
}

View File

@ -1,12 +1,12 @@
use hal;
use hal::command::RawCommandBuffer;
use registry::{HUB, Items, Registry};
use {
Stored,
CommandBufferId, RenderPassId,
};
use hal;
use hal::command::RawCommandBuffer;
pub struct RenderPass<B: hal::Backend> {
raw: B::CommandBuffer,

View File

@ -44,20 +44,24 @@ pub extern "C" fn wgpu_device_create_bind_group_layout(
desc: binding_model::BindGroupLayoutDescriptor,
) -> BindGroupLayoutId {
let bindings = unsafe { slice::from_raw_parts(desc.bindings, desc.bindings_length) };
let device_guard = HUB.devices.lock();
let device = device_guard.get(device_id);
let descriptor_set_layout = device.raw.create_descriptor_set_layout(
bindings.iter().map(|binding| {
hal::pso::DescriptorSetLayoutBinding {
binding: binding.binding,
ty: conv::map_binding_type(&binding.ty),
count: bindings.len(),
stage_flags: conv::map_shader_stage_flags(binding.visibility),
immutable_samplers: false, // TODO
}
}),
&[],
);
let descriptor_set_layout = HUB.devices
.lock()
.get(device_id)
.raw
.create_descriptor_set_layout(
bindings.iter().map(|binding| {
hal::pso::DescriptorSetLayoutBinding {
binding: binding.binding,
ty: conv::map_binding_type(&binding.ty),
count: bindings.len(),
stage_flags: conv::map_shader_stage_flags(binding.visibility),
immutable_samplers: false, // TODO
}
}),
&[],
);
HUB.bind_group_layouts
.lock()
.register(binding_model::BindGroupLayout {
@ -70,16 +74,21 @@ pub extern "C" fn wgpu_device_create_pipeline_layout(
device_id: DeviceId,
desc: binding_model::PipelineLayoutDescriptor,
) -> PipelineLayoutId {
let bind_group_layouts = unsafe {
slice::from_raw_parts(desc.bind_group_layouts, desc.bind_group_layouts_length)
};
let bind_group_layout_guard = HUB.bind_group_layouts.lock();
let descriptor_set_layouts =
unsafe { slice::from_raw_parts(desc.bind_group_layouts, desc.bind_group_layouts_length) }
.iter()
.map(|id| bind_group_layout_guard.get(id.clone()))
.collect::<Vec<_>>();
let device_guard = HUB.devices.lock();
let device = &device_guard.get(device_id).raw;
let pipeline_layout =
device.create_pipeline_layout(descriptor_set_layouts.iter().map(|d| &d.raw), &[]); // TODO: push constants
let descriptor_set_layouts = bind_group_layouts
.iter()
.map(|&id| &bind_group_layout_guard.get(id).raw);
// TODO: push constants
let pipeline_layout = HUB.devices
.lock()
.get(device_id)
.raw
.create_pipeline_layout(descriptor_set_layouts, &[]);
HUB.pipeline_layouts
.lock()
.register(binding_model::PipelineLayout {
@ -116,11 +125,16 @@ pub extern "C" fn wgpu_device_create_shader_module(
device_id: DeviceId,
desc: pipeline::ShaderModuleDescriptor,
) -> ShaderModuleId {
let device_guard = HUB.devices.lock();
let device = &device_guard.get(device_id).raw;
let shader = device
.create_shader_module(unsafe { slice::from_raw_parts(desc.code.bytes, desc.code.length) })
let spv = unsafe {
slice::from_raw_parts(desc.code.bytes, desc.code.length)
};
let shader = HUB.devices
.lock()
.get(device_id)
.raw
.create_shader_module(spv)
.unwrap();
HUB.shader_modules
.lock()
.register(ShaderModule { raw: shader })
@ -133,6 +147,7 @@ pub extern "C" fn wgpu_device_create_command_buffer(
) -> CommandBufferId {
let mut device_guard = HUB.devices.lock();
let device = device_guard.get_mut(device_id);
let mut cmd_buf = device.com_allocator.allocate(device_id, &device.raw);
cmd_buf.raw.as_mut().unwrap().begin(
hal::command::CommandBufferFlags::ONE_TIME_SUBMIT,

View File

@ -106,6 +106,8 @@ pub type BindGroupLayoutId = Id;
type BindGroupLayoutHandle = BindGroupLayout<B>;
pub type PipelineLayoutId = Id;
type PipelineLayoutHandle = PipelineLayout<B>;
pub type BindGroupId = Id;
type BindGroupHandle = BindGroup<B>;
// Pipeline
pub type BlendStateId = Id;
@ -117,9 +119,10 @@ pub type ShaderModuleId = Id;
type ShaderModuleHandle = ShaderModule<B>;
pub type AttachmentStateId = Id;
type AttachmentStateHandle = AttachmentState<B>;
pub type ComputePipelineId = Id;
pub type RenderPipelineId = Id;
type RenderPipelineHandle = RenderPipeline<B>;
pub type ComputePipelineId = Id;
type ComputePipelineHandle = ComputePipeline<B>;
pub type CommandBufferId = Id;
type CommandBufferHandle = CommandBuffer<B>;

View File

@ -227,8 +227,8 @@ pub struct ComputePipelineDescriptor {
pub stages: *const PipelineStageDescriptor,
}
pub struct ComputePipeline {
// TODO
pub(crate) struct ComputePipeline<B: hal::Backend> {
pub raw: B::ComputePipeline,
}
#[repr(C)]

View File

@ -9,10 +9,10 @@ pub use self::local::{Id, ItemsGuard, Registry as ConcreteRegistry};
pub use self::remote::{Id, ItemsGuard, Registry as ConcreteRegistry};
use {
AdapterHandle, AttachmentStateHandle, BindGroupLayoutHandle, BlendStateHandle,
CommandBufferHandle, DepthStencilStateHandle, DeviceHandle, InstanceHandle,
AdapterHandle, AttachmentStateHandle, BindGroupLayoutHandle, BindGroupHandle,
BlendStateHandle, CommandBufferHandle, DepthStencilStateHandle, DeviceHandle, InstanceHandle,
RenderPassHandle, ComputePassHandle,
PipelineLayoutHandle, RenderPipelineHandle, ShaderModuleHandle,
PipelineLayoutHandle, RenderPipelineHandle, ComputePipelineHandle, ShaderModuleHandle,
};
@ -37,12 +37,14 @@ pub struct Hub {
pub(crate) devices: ConcreteRegistry<DeviceHandle>,
pub(crate) pipeline_layouts: ConcreteRegistry<PipelineLayoutHandle>,
pub(crate) bind_group_layouts: ConcreteRegistry<BindGroupLayoutHandle>,
pub(crate) bind_groups: ConcreteRegistry<BindGroupHandle>,
pub(crate) attachment_states: ConcreteRegistry<AttachmentStateHandle>,
pub(crate) blend_states: ConcreteRegistry<BlendStateHandle>,
pub(crate) depth_stencil_states: ConcreteRegistry<DepthStencilStateHandle>,
pub(crate) shader_modules: ConcreteRegistry<ShaderModuleHandle>,
pub(crate) command_buffers: ConcreteRegistry<CommandBufferHandle>,
pub(crate) render_pipelines: ConcreteRegistry<RenderPipelineHandle>,
pub(crate) compute_pipelines: ConcreteRegistry<ComputePipelineHandle>,
pub(crate) render_passes: ConcreteRegistry<RenderPassHandle>,
pub(crate) compute_passes: ConcreteRegistry<ComputePassHandle>,
}

View File

@ -35,6 +35,10 @@ pub struct BindGroupLayout {
id: wgn::BindGroupLayoutId,
}
pub struct BindGroup {
id: wgn::BindGroupId,
}
pub struct ShaderModule {
id: wgn::ShaderModuleId,
}
@ -59,6 +63,10 @@ pub struct RenderPipeline {
id: wgn::RenderPipelineId,
}
pub struct ComputePipeline {
id: wgn::ComputePipelineId,
}
pub struct CommandBuffer {
id: wgn::CommandBufferId,
}
@ -273,6 +281,18 @@ impl<'a> ComputePass<'a> {
wgn::wgpu_compute_pass_end_pass(self.id);
self.parent
}
pub fn set_bind_group(&mut self, index: u32, bind_group: &BindGroup) {
wgn::wgpu_compute_pass_set_bind_group(self.id, index, bind_group.id);
}
pub fn set_pipeline(&mut self, pipeline: &ComputePipeline) {
wgn::wgpu_compute_pass_set_pipeline(self.id, pipeline.id);
}
pub fn dispatch(&mut self, x: u32, y: u32, z: u32) {
wgn::wgpu_compute_pass_dispatch(self.id, x, y, z);
}
}
impl Queue {