update synchronization, fix last errors in wgc

This commit is contained in:
Dzmitry Malyshau 2021-06-08 01:19:18 -04:00
parent 0a82c232ba
commit ff2a3e84fc
16 changed files with 253 additions and 255 deletions

1
Cargo.lock generated
View File

@ -1891,6 +1891,7 @@ version = "0.1.0"
dependencies = [
"bitflags",
"naga",
"raw-window-handle",
"smallvec",
"thiserror",
"wgpu-types",

View File

@ -236,7 +236,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal::TextureUse::COPY_DST,
)
.map_err(ClearError::InvalidTexture)?;
let dst_raw = dst_texture
let _dst_raw = dst_texture
.raw
.as_ref()
.ok_or(ClearError::InvalidTexture(dst))?;

View File

@ -10,7 +10,7 @@ use crate::{
PassErrorScope, QueryResetMap, QueryUseError, RenderCommand, RenderCommandError,
StateChange,
},
device::{AttachmentData, Device, RenderPassCompatibilityError, RenderPassContext},
device::{AttachmentData, RenderPassCompatibilityError, RenderPassContext},
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token},
id,
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
@ -524,7 +524,6 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
color_attachments: &[RenderPassColorAttachment],
depth_stencil_attachment: Option<&RenderPassDepthStencilAttachment>,
cmd_buf: &mut CommandBuffer<A>,
device: &Device<A>,
view_guard: &'a Storage<TextureView<A>, id::TextureViewId>,
) -> Result<Self, RenderPassErrorInner> {
profiling::scope!("start", "RenderPassInfo");
@ -539,7 +538,6 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
let mut attachment_type_name = "";
let mut extent = None;
let mut sample_count = 0;
let mut depth_stencil_aspects = hal::FormatAspect::empty();
let mut used_swap_chain = None::<Stored<id::SwapChainId>>;
let mut add_view = |view: &TextureView<A>, type_name| {
@ -576,8 +574,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
.map_err(|_| RenderPassErrorInner::InvalidAttachment(at.view))?;
add_view(view, "depth")?;
depth_stencil_aspects = view.desc.aspects();
if depth_stencil_aspects.contains(hal::FormatAspect::COLOR) {
let ds_aspects = view.desc.aspects();
if ds_aspects.contains(hal::FormatAspect::COLOR) {
return Err(RenderPassErrorInner::InvalidDepthStencilAttachmentFormat(
view.desc.format,
));
@ -595,7 +593,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
.trackers
.textures
.query(source_id.value, view.selector.clone());
let new_use = if at.is_read_only(depth_stencil_aspects)? {
let new_use = if at.is_read_only(ds_aspects)? {
is_ds_read_only = true;
hal::TextureUse::DEPTH_STENCIL_READ | hal::TextureUse::SAMPLED
} else {
@ -672,18 +670,18 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
.views
.use_extend(&*view_guard, resolve_target, (), ())
.map_err(|_| RenderPassErrorInner::InvalidAttachment(resolve_target))?;
if extent != Some(resolve_view.extent) {
if color_view.extent != resolve_view.extent {
return Err(RenderPassErrorInner::AttachmentsDimensionMismatch {
previous: (attachment_type_name, extent.unwrap_or_default()),
mismatch: ("resolve", resolve_view.extent),
});
}
if color_view.samples == 1 {
return Err(RenderPassErrorInner::InvalidResolveSourceSampleCount);
}
if resolve_view.samples != 1 {
return Err(RenderPassErrorInner::InvalidResolveTargetSampleCount);
}
if sample_count == 1 {
return Err(RenderPassErrorInner::InvalidResolveSourceSampleCount);
}
let boundary_usage = match resolve_view.source {
TextureViewSource::Native(ref source_id) => {
@ -702,7 +700,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
let old_use = previous_use.unwrap_or(new_use);
old_use..new_use
}
TextureViewSource::SwapChain(source_id) => {
TextureViewSource::SwapChain(ref source_id) => {
assert!(used_swap_chain.is_none());
used_swap_chain = Some(source_id.clone());
hal::TextureUse::UNINITIALIZED..hal::TextureUse::empty()
@ -867,10 +865,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
let device = &device_guard[cmd_buf.device_id.value];
let mut raw = device
.raw
.create_command_buffer(&hal::CommandBufferDescriptor { label: base.label })
.unwrap(); //TODO: handle this better
let mut raw = unsafe {
device
.raw
.create_command_buffer(&hal::CommandBufferDescriptor { label: base.label })
.unwrap() //TODO: handle this better
};
let (bundle_guard, mut token) = hub.render_bundles.read(&mut token);
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
@ -892,7 +892,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
color_attachments,
depth_stencil_attachment,
cmd_buf,
device,
&*view_guard,
)
.map_pass_err(scope)?;

View File

@ -538,7 +538,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}),
);
let (block_width, _) = format_desc.block_dimensions;
if !conv::is_valid_copy_dst_texture_format(dst_texture.desc.format) {
return Err(
TransferError::CopyToForbiddenTextureFormat(dst_texture.desc.format).into(),
@ -653,7 +652,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
true,
)?;
let (block_width, _) = format_desc.block_dimensions;
if !conv::is_valid_copy_src_texture_format(src_texture.desc.format) {
return Err(
TransferError::CopyFromForbiddenTextureFormat(src_texture.desc.format).into(),

View File

@ -106,10 +106,8 @@ pub fn check_texture_dimension_size(
limits: &wgt::Limits,
) -> Result<(), resource::TextureDimensionError> {
use resource::{TextureDimensionError as Tde, TextureErrorDimension as Ted};
use std::convert::TryInto;
use wgt::TextureDimension::*;
let layers = depth_or_array_layers.try_into().unwrap_or(!0);
let (extent_limits, sample_limit) = match dimension {
D1 => (
[
@ -155,41 +153,3 @@ pub fn check_texture_dimension_size(
Ok(())
}
pub fn map_color_f32(color: &wgt::Color) -> [f32; 4] {
[
color.r as f32,
color.g as f32,
color.b as f32,
color.a as f32,
]
}
pub fn map_color_i32(color: &wgt::Color) -> [i32; 4] {
[
color.r as i32,
color.g as i32,
color.b as i32,
color.a as i32,
]
}
pub fn map_color_u32(color: &wgt::Color) -> [u32; 4] {
[
color.r as u32,
color.g as u32,
color.b as u32,
color.a as u32,
]
}
/// Take `value` and round it up to the nearest alignment `alignment`.
///
/// ```text
/// (0, 3) -> 0
/// (1, 3) -> 3
/// (2, 3) -> 3
/// (3, 3) -> 3
/// (4, 3) -> 6
/// ...
pub fn align_up(value: u32, alignment: u32) -> u32 {
((value + alignment - 1) / alignment) * alignment
}

View File

@ -19,8 +19,6 @@ use thiserror::Error;
use std::sync::atomic::Ordering;
const CLEANUP_WAIT_MS: u32 = 5000;
/// A struct that keeps lists of resources that are no longer needed by the user.
#[derive(Debug, Default)]
pub(super) struct SuspectedResources {
@ -165,7 +163,6 @@ impl<A: hal::Api> NonReferencedResources<A> {
struct ActiveSubmission<A: hal::Api> {
index: SubmissionIndex,
fence: A::Fence,
last_resources: NonReferencedResources<A>,
mapped: Vec<id::Valid<id::BufferId>>,
}
@ -222,7 +219,6 @@ impl<A: hal::Api> LifetimeTracker<A> {
pub fn track_submission(
&mut self,
index: SubmissionIndex,
fence: A::Fence,
new_suspects: &SuspectedResources,
temp_resources: impl Iterator<Item = TempResource<A>>,
) {
@ -248,7 +244,6 @@ impl<A: hal::Api> LifetimeTracker<A> {
self.active.alloc().init(ActiveSubmission {
index,
fence,
last_resources,
mapped: Vec::new(),
});
@ -258,65 +253,23 @@ impl<A: hal::Api> LifetimeTracker<A> {
self.mapped.push(Stored { value, ref_count });
}
fn wait_idle(&self, device: &A::Device) -> Result<(), WaitIdleError> {
if !self.active.is_empty() {
log::debug!("Waiting for IDLE...");
let mut status = true;
//TODO: change this to wait for the last fence value only
for a in self.active.iter() {
status &= unsafe {
device
.wait(
&a.fence,
a.index as u64, //TODO: check this
CLEANUP_WAIT_MS,
)
.map_err(DeviceError::from)?
};
}
log::debug!("...Done");
if !status {
// We timed out while waiting for the fences
return Err(WaitIdleError::StuckGpu);
}
}
Ok(())
}
/// Returns the last submission index that is done.
pub fn triage_submissions(
&mut self,
device: &A::Device,
force_wait: bool,
) -> Result<SubmissionIndex, WaitIdleError> {
pub fn triage_submissions(&mut self, last_done: SubmissionIndex) {
profiling::scope!("triage_submissions");
/* TODO: better sync
if force_wait {
self.wait_idle(device)?;
}
//TODO: enable when `is_sorted_by_key` is stable
//debug_assert!(self.active.is_sorted_by_key(|a| a.index));
let done_count = self
.active
.iter()
.position(|a| unsafe { device.get_fence_value(&a.fence).unwrap() })
.position(|a| a.index > last_done)
.unwrap_or_else(|| self.active.len());
let last_done = match done_count.checked_sub(1) {
Some(i) => self.active[i].index,
None => return Ok(0),
};
for a in self.active.drain(..done_count) {
log::trace!("Active submission {} is done", a.index);
self.free_resources.extend(a.last_resources);
self.ready_to_map.extend(a.mapped);
unsafe {
device.destroy_fence(a.fence);
}
}*/
let last_done = 0;
Ok(last_done)
}
}
pub fn cleanup(&mut self, device: &A::Device) {

View File

@ -11,7 +11,8 @@ use crate::{
pipeline, resource, swap_chain,
track::{BufferState, TextureSelector, TextureState, TrackerSet, UsageConflict},
validation::{self, check_buffer_usage, check_texture_usage},
FastHashMap, Label, LabelHelpers, LifeGuard, MultiRefCount, Stored, SubmissionIndex,
CowHelpers as _, FastHashMap, Label, LabelHelpers as _, LifeGuard, MultiRefCount, Stored,
SubmissionIndex,
};
use arrayvec::ArrayVec;
@ -31,6 +32,7 @@ pub mod trace;
use smallvec::SmallVec;
pub const SHADER_STAGE_COUNT: usize = 3;
const CLEANUP_WAIT_MS: u32 = 5000;
const IMPLICIT_FAILURE: &str = "failed implicit";
@ -54,13 +56,6 @@ pub(crate) struct AttachmentData<T> {
}
impl<T: PartialEq> Eq for AttachmentData<T> {}
impl<T> AttachmentData<T> {
pub(crate) fn all(&self) -> impl Iterator<Item = &T> {
self.colors
.iter()
.chain(&self.resolves)
.chain(&self.depth_stencil)
}
pub(crate) fn map<U, F: Fn(&T) -> U>(&self, fun: F) -> AttachmentData<U> {
AttachmentData {
colors: self.colors.iter().map(&fun).collect(),
@ -128,18 +123,19 @@ fn map_buffer<A: hal::Api>(
size: BufferAddress,
kind: HostMap,
) -> Result<ptr::NonNull<u8>, resource::BufferAccessError> {
let ptr = raw
.map_buffer(buffer.raw.as_ref().unwrap(), offset..offset + size)
.map_err(DeviceError::from)?;
let ptr = unsafe {
raw.map_buffer(buffer.raw.as_ref().unwrap(), offset..offset + size)
.map_err(DeviceError::from)?
};
buffer.sync_mapped_writes = match kind {
HostMap::Read if !buffer.is_coherent => {
HostMap::Read if !buffer.is_coherent => unsafe {
raw.invalidate_mapped_ranges(
buffer.raw.as_ref().unwrap(),
iter::once(offset..offset + size),
);
None
}
},
HostMap::Write if !buffer.is_coherent => Some(offset..offset + size),
_ => None,
};
@ -163,10 +159,12 @@ fn map_buffer<A: hal::Api>(
)
};
if zero_init_needs_flush_now {
raw.flush_mapped_ranges(
buffer.raw.as_ref().unwrap(),
iter::once(uninitialized_range.start..uninitialized_range.start + num_bytes),
);
unsafe {
raw.flush_mapped_ranges(
buffer.raw.as_ref().unwrap(),
iter::once(uninitialized_range.start..uninitialized_range.start + num_bytes),
)
};
}
}
@ -201,6 +199,7 @@ pub struct Device<A: hal::Api> {
//Note: The submission index here corresponds to the last submission that is done.
pub(crate) life_guard: LifeGuard,
pub(crate) active_submission_index: SubmissionIndex,
fence: A::Fence,
/// Has to be locked temporarily only (locked last)
pub(crate) trackers: Mutex<TrackerSet>,
// Life tracker should be locked right after the device and before anything else.
@ -226,7 +225,7 @@ pub enum CreateDeviceError {
impl<A: HalApi> Device<A> {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
device: hal::OpenDevice<A>,
open: hal::OpenDevice<A>,
adapter_id: Stored<id::AdapterId>,
alignments: hal::Alignments,
downlevel: wgt::DownlevelCapabilities,
@ -237,13 +236,16 @@ impl<A: HalApi> Device<A> {
if let Some(_) = trace_path {
log::error!("Feature 'trace' is not enabled");
}
let fence =
unsafe { open.device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?;
Ok(Self {
raw: device.device,
raw: open.device,
adapter_id,
queue: device.queue,
queue: open.queue,
life_guard: LifeGuard::new("<device>"),
active_submission_index: 0,
fence,
trackers: Mutex::new(TrackerSet::new(A::VARIANT)),
life_tracker: Mutex::new(life::LifetimeTracker::new()),
temp_suspected: life::SuspectedResources::default(),
@ -277,10 +279,6 @@ impl<A: HalApi> Device<A> {
}
}
pub(crate) fn last_completed_submission_index(&self) -> SubmissionIndex {
self.life_guard.submission_index.load(Ordering::Acquire)
}
fn lock_life_internal<'this, 'token: 'this>(
tracker: &'this Mutex<life::LifetimeTracker<A>>,
_token: &mut Token<'token, Self>,
@ -313,13 +311,27 @@ impl<A: HalApi> Device<A> {
token,
);
life_tracker.triage_mapped(hub, token);
let last_done = life_tracker.triage_submissions(&self.raw, force_wait)?;
let last_done_index = if force_wait {
let current_index = self.active_submission_index;
unsafe {
self.raw
.wait(&self.fence, current_index, CLEANUP_WAIT_MS)
.map_err(DeviceError::from)?
};
current_index
} else {
unsafe {
self.raw
.get_fence_value(&self.fence)
.map_err(DeviceError::from)?
}
};
life_tracker.triage_submissions(last_done_index);
let callbacks = life_tracker.handle_mapping(hub, &self.raw, &self.trackers, token);
life_tracker.cleanup(&self.raw);
self.life_guard
.submission_index
.store(last_done, Ordering::Release);
Ok(callbacks)
}
@ -416,11 +428,14 @@ impl<A: HalApi> Device<A> {
usage |= hal::BufferUse::COPY_DST;
}
let mut memory_flags = hal::MemoryFlag::empty();
memory_flags.set(hal::MemoryFlag::TRANSIENT, transient);
let hal_desc = hal::BufferDescriptor {
label: desc.label.map(|cow| cow.as_ref()),
label: desc.label.borrow_option(),
size: desc.size,
usage,
memory_flags: hal::MemoryFlag::empty(),
memory_flags,
};
let buffer = unsafe { self.raw.create_buffer(&hal_desc) }.map_err(DeviceError::from)?;
@ -508,7 +523,7 @@ impl<A: HalApi> Device<A> {
usage: conv::map_texture_usage(desc.usage, desc.format.into()),
memory_flags: hal::MemoryFlag::empty(),
};
let mut raw = unsafe {
let raw = unsafe {
self.raw
.create_texture(&hal_desc)
.map_err(DeviceError::from)?
@ -900,8 +915,7 @@ impl<A: HalApi> Device<A> {
} => (Some(wgt::Features::BUFFER_BINDING_ARRAY), false),
Bt::Buffer {
ty: wgt::BufferBindingType::Storage { read_only },
has_dynamic_offset,
min_binding_size: _,
..
} => (Some(wgt::Features::BUFFER_BINDING_ARRAY), !read_only),
Bt::Sampler { .. } => (None, false),
Bt::Texture { .. } => (Some(wgt::Features::SAMPLED_TEXTURE_BINDING_ARRAY), false),
@ -920,7 +934,7 @@ impl<A: HalApi> Device<A> {
};
// Validate the count parameter
if let Some(count) = entry.count {
if entry.count.is_some() {
required_features |= array_feature
.ok_or(binding_model::BindGroupLayoutEntryError::ArrayUnsupported)
.map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
@ -1515,7 +1529,7 @@ impl<A: HalApi> Device<A> {
.iter()
.map(|&id| &bgl_guard.get(id).unwrap().raw)
.collect(),
push_constant_ranges: desc.push_constant_ranges,
push_constant_ranges: desc.push_constant_ranges.reborrow(),
};
let raw = unsafe {
@ -1631,28 +1645,30 @@ impl<A: HalApi> Device<A> {
.get(desc.stage.module)
.map_err(|_| validation::StageError::InvalidModule)?;
let flag = wgt::ShaderStage::COMPUTE;
let provided_layouts = match desc.layout {
Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts(
pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?,
&*bgl_guard,
)),
None => {
for _ in 0..self.limits.max_bind_groups {
derived_group_layouts.push(binding_model::BindEntryMap::default());
{
let flag = wgt::ShaderStage::COMPUTE;
let provided_layouts = match desc.layout {
Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts(
pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?,
&*bgl_guard,
)),
None => {
for _ in 0..self.limits.max_bind_groups {
derived_group_layouts.push(binding_model::BindEntryMap::default());
}
None
}
None
}
};
let _ = shader_module.interface.check_stage(
provided_layouts.as_ref().map(|p| p.as_slice()),
&mut derived_group_layouts,
&desc.stage.entry_point,
flag,
io,
)?;
};
let _ = shader_module.interface.check_stage(
provided_layouts.as_ref().map(|p| p.as_slice()),
&mut derived_group_layouts,
&desc.stage.entry_point,
flag,
io,
)?;
}
let pipeline_layout_id = match desc.layout {
Some(id) => id,
@ -1672,7 +1688,7 @@ impl<A: HalApi> Device<A> {
label: desc.label.borrow_option(),
layout: &layout.raw,
stage: hal::ProgrammableStage {
entry_point: desc.stage.entry_point,
entry_point: desc.stage.entry_point.reborrow(),
module: &shader_module.raw,
},
};
@ -1944,7 +1960,7 @@ impl<A: HalApi> Device<A> {
hal::ProgrammableStage {
module: &shader_module.raw,
entry_point: stage.entry_point,
entry_point: stage.entry_point.reborrow(),
}
};
@ -1989,7 +2005,7 @@ impl<A: HalApi> Device<A> {
Some(hal::ProgrammableStage {
module: &shader_module.raw,
entry_point: fragment.stage.entry_point,
entry_point: fragment.stage.entry_point.reborrow(),
})
}
None => None,
@ -2050,7 +2066,7 @@ impl<A: HalApi> Device<A> {
vertex_buffers: vertex_buffers.into(),
vertex_stage,
primitive: desc.primitive,
depth_stencil: desc.depth_stencil,
depth_stencil: desc.depth_stencil.clone(),
multisample: desc.multisample,
fragment_stage,
color_targets: Cow::Borrowed(color_targets),
@ -2117,14 +2133,21 @@ impl<A: HalApi> Device<A> {
submission_index: SubmissionIndex,
token: &mut Token<Self>,
) -> Result<(), WaitIdleError> {
if self.last_completed_submission_index() <= submission_index {
let last_done_index = unsafe {
self.raw
.get_fence_value(&self.fence)
.map_err(DeviceError::from)?
};
if last_done_index < submission_index {
log::info!("Waiting for submission {:?}", submission_index);
self.lock_life(token)
.triage_submissions(&self.raw, true)
.map(|_| ())
} else {
Ok(())
unsafe {
self.raw
.wait(&self.fence, submission_index, !0)
.map_err(DeviceError::from)?
};
self.lock_life(token).triage_submissions(submission_index);
}
Ok(())
}
fn create_query_set(
@ -2186,13 +2209,18 @@ impl<A: hal::Api> Device<A> {
/// Wait for idle and remove resources that we can, before we die.
pub(crate) fn prepare_to_die(&mut self) {
let mut life_tracker = self.life_tracker.lock();
if let Err(error) = life_tracker.triage_submissions(&self.raw, true) {
log::error!("failed to triage submissions: {}", error);
let current_index = self.active_submission_index;
if let Err(error) = unsafe { self.raw.wait(&self.fence, current_index, CLEANUP_WAIT_MS) } {
log::error!("failed to wait for the device: {:?}", error);
}
life_tracker.triage_submissions(current_index);
life_tracker.cleanup(&self.raw);
}
pub(crate) fn dispose(self) {
unsafe {
self.raw.destroy_fence(self.fence);
}
self.pending_writes.dispose(&self.raw);
}
}
@ -2391,7 +2419,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
};
let stage_buffer = stage.raw.unwrap();
let ptr = match device.raw.map_buffer(&stage_buffer, 0..stage.size) {
let ptr = match unsafe { device.raw.map_buffer(&stage_buffer, 0..stage.size) } {
Ok(ptr) => ptr,
Err(e) => {
let raw = buffer.raw.unwrap();
@ -2496,19 +2524,21 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
let raw_buf = buffer.raw.as_ref().unwrap();
let ptr = device
.raw
.map_buffer(raw_buf, 0..data.len() as u64)
.map_err(DeviceError::from)?;
ptr::copy_nonoverlapping(
data.as_ptr(),
ptr.as_ptr().offset(offset as isize),
data.len(),
);
device
.raw
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
unsafe {
let ptr = device
.raw
.map_buffer(raw_buf, 0..data.len() as u64)
.map_err(DeviceError::from)?;
ptr::copy_nonoverlapping(
data.as_ptr(),
ptr.as_ptr().offset(offset as isize),
data.len(),
);
device
.raw
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
}
//TODO: flush
Ok(())
@ -2540,19 +2570,21 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
//TODO: invalidate
let raw_buf = buffer.raw.as_ref().unwrap();
let ptr = device
.raw
.map_buffer(raw_buf, 0..data.len() as u64)
.map_err(DeviceError::from)?;
ptr::copy_nonoverlapping(
ptr.as_ptr().offset(offset as isize),
data.as_mut_ptr(),
data.len(),
);
device
.raw
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
unsafe {
let ptr = device
.raw
.map_buffer(raw_buf, 0..data.len() as u64)
.map_err(DeviceError::from)?;
ptr::copy_nonoverlapping(
ptr.as_ptr().offset(offset as isize),
data.as_mut_ptr(),
data.len(),
);
device
.raw
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
}
Ok(())
}
@ -3375,7 +3407,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
label: desc.label.borrow_option(),
})
};
let mut raw = match cmd_buf_result {
let raw = match cmd_buf_result {
Ok(raw) => raw,
Err(error) => break DeviceError::from(error),
};
@ -3901,7 +3933,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if !caps.formats.contains(&config.format) {
return Err(swap_chain::CreateSwapChainError::UnsupportedFormat {
requested: config.format,
available: caps.formats,
available: caps.formats.clone(),
});
}
if !caps.usage.contains(config.usage) {
@ -3940,7 +3972,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Err(_) => break swap_chain::CreateSwapChainError::InvalidSurface,
};
let caps = {
let caps = unsafe {
let surface = A::get_surface_mut(surface);
let adapter = &adapter_guard[device.adapter_id.value];
match adapter.raw.adapter.surface_capabilities(surface) {
@ -4303,6 +4335,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
});
}
let _ = ptr;
if needs_flush {
unsafe {
device
.raw
.flush_mapped_ranges(&stage_buffer, iter::once(0..buffer.size));
}
}
let raw_buf = buffer
.raw
@ -4361,10 +4400,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
let _ = (ptr, range);
}
device
.raw
.unmap_buffer(buffer.raw.as_ref().unwrap())
.map_err(DeviceError::from)?;
unsafe {
device
.raw
.unmap_buffer(buffer.raw.as_ref().unwrap())
.map_err(DeviceError::from)?
};
}
}
Ok(None)

View File

@ -168,7 +168,7 @@ impl<A: hal::Api> super::Device<A> {
usage: hal::BufferUse::MAP_WRITE | hal::BufferUse::COPY_SRC,
memory_flags: hal::MemoryFlag::TRANSIENT,
};
let mut buffer = unsafe { self.raw.create_buffer(&stage_desc)? };
let buffer = unsafe { self.raw.create_buffer(&stage_desc)? };
let cmdbuf = match self.pending_writes.command_buffer.take() {
Some(cmdbuf) => cmdbuf,
@ -501,14 +501,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
}
device
.raw
.unmap_buffer(&stage.buffer)
.map_err(DeviceError::from)?;
if !stage.is_coherent {
unsafe {
device
.raw
.flush_mapped_ranges(&stage.buffer, iter::once(0..stage_size));
.unmap_buffer(&stage.buffer)
.map_err(DeviceError::from)?;
if !stage.is_coherent {
device
.raw
.flush_mapped_ranges(&stage.buffer, iter::once(0..stage_size));
}
}
// WebGPU uses the physical size of the texture for copies whereas vulkan uses
@ -572,7 +574,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device.active_submission_index += 1;
let submit_index = device.active_submission_index;
let fence = {
{
let mut signal_swapchain_semaphores = SmallVec::<[_; 1]>::new();
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token);
@ -701,12 +703,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
cmdbuf.raw.last_mut().unwrap().finish();
}
// execute resource transitions
let mut transit = device
.raw
.create_command_buffer(&hal::CommandBufferDescriptor {
label: Some("_Transit"),
})
.map_err(DeviceError::from)?;
let mut transit = unsafe {
device
.raw
.create_command_buffer(&hal::CommandBufferDescriptor {
label: Some("_Transit"),
})
.map_err(DeviceError::from)?
};
log::trace!("Stitching command buffer {:?} before submission", cmb_id);
trackers.merge_extend_stateless(&cmdbuf.trackers);
CommandBuffer::insert_barriers(
@ -731,13 +735,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
// now prepare the GPU submission
let mut fence = device.raw.create_fence().map_err(DeviceError::from)?;
//Note: we could technically avoid the heap Vec here
let mut command_buffers = Vec::new();
command_buffers.extend(pending_write_command_buffer);
for &cmd_buf_id in command_buffer_ids.iter() {
match command_buffer_guard.get(cmd_buf_id) {
match command_buffer_guard.get_mut(cmd_buf_id) {
Ok(cmd_buf) if cmd_buf.is_finished() => {
command_buffers.extend(cmd_buf.raw.drain(..));
}
@ -745,14 +747,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
let fence_value = 1; //TODO
unsafe {
device
.queue
.submit(command_buffers.into_iter(), Some((&mut fence, fence_value)));
device.queue.submit(
command_buffers.into_iter(),
Some((&mut device.fence, submit_index)),
);
}
fence
};
}
let callbacks = match device.maintain(&hub, false, &mut token) {
Ok(callbacks) => callbacks,
@ -763,7 +764,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
profiling::scope!("cleanup");
super::Device::lock_life_internal(&device.life_tracker, &mut token).track_submission(
submit_index,
fence,
&device.temp_suspected,
device.pending_writes.temp_resources.drain(..),
);

View File

@ -627,14 +627,18 @@ impl<A: HalApi, F: GlobalIdentityHandlerFactory> Hub<A, F> {
if let Element::Occupied(command_buffer, _) = element {
let device = &devices[command_buffer.device_id.value];
for raw in command_buffer.raw {
device.raw.destroy_command_buffer(raw);
unsafe {
device.raw.destroy_command_buffer(raw);
}
}
}
}
for element in self.bind_groups.data.write().map.drain(..) {
if let Element::Occupied(bind_group, _) = element {
let device = &devices[bind_group.device_id.value];
device.raw.destroy_bind_group(bind_group.raw);
unsafe {
device.raw.destroy_bind_group(bind_group.raw);
}
}
}

View File

@ -140,11 +140,12 @@ impl<A: HalApi> Adapter<A> {
wgt::TextureFormat::Rgba8Unorm,
];
let caps = self
.raw
.adapter
.surface_capabilities(A::get_surface_mut(surface))
.ok_or(GetSwapChainPreferredFormatError::UnsupportedQueueFamily)?;
let caps = unsafe {
self.raw
.adapter
.surface_capabilities(A::get_surface_mut(surface))
.ok_or(GetSwapChainPreferredFormatError::UnsupportedQueueFamily)?
};
preferred_formats
.iter()
@ -157,7 +158,7 @@ impl<A: HalApi> Adapter<A> {
&self,
format: wgt::TextureFormat,
) -> wgt::TextureFormatFeatures {
let caps = self.raw.adapter.texture_format_capabilities(format);
let caps = unsafe { self.raw.adapter.texture_format_capabilities(format) };
let mut allowed_usages = format.describe().guaranteed_format_features.allowed_usages;
allowed_usages.set(
@ -351,6 +352,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
};
Surface {
/*
#[cfg(vulkan)]
vulkan: map(&self.instance.vulkan),
#[cfg(metal)]
@ -361,6 +363,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
dx11: map(&self.instance.dx11),
#[cfg(gl)]
gl: map(&self.instance.gl),
*/
}
}
};

View File

@ -66,11 +66,11 @@ use loom::sync::atomic;
#[cfg(not(test))]
use std::sync::atomic;
use atomic::{AtomicUsize, Ordering};
use atomic::{AtomicU64, AtomicUsize, Ordering};
use std::{borrow::Cow, os::raw::c_char, ptr};
type SubmissionIndex = usize;
type SubmissionIndex = hal::FenceValue;
type Index = u32;
type Epoch = u32;
@ -90,6 +90,15 @@ impl<'a> LabelHelpers<'a> for Label<'a> {
}
}
trait CowHelpers<'a> {
fn reborrow(&'a self) -> Self;
}
impl<'a, T: ToOwned + ?Sized> CowHelpers<'a> for Cow<'a, T> {
fn reborrow(&'a self) -> Self {
Cow::Borrowed(self.as_ref())
}
}
/// Reference count object that is 1:1 with each reference.
#[derive(Debug)]
struct RefCount(ptr::NonNull<AtomicUsize>);
@ -197,7 +206,7 @@ impl Drop for MultiRefCount {
#[derive(Debug)]
pub struct LifeGuard {
ref_count: Option<RefCount>,
submission_index: AtomicUsize,
submission_index: AtomicU64,
#[cfg(debug_assertions)]
pub(crate) label: String,
}
@ -208,7 +217,7 @@ impl LifeGuard {
let bx = Box::new(AtomicUsize::new(1));
Self {
ref_count: ptr::NonNull::new(Box::into_raw(bx)).map(RefCount),
submission_index: AtomicUsize::new(0),
submission_index: AtomicU64::new(0),
#[cfg(debug_assertions)]
label: label.to_string(),
}

View File

@ -162,6 +162,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Err(DeviceError::from(err).into());
}
hal::SurfaceError::Outdated => Status::Outdated,
hal::SurfaceError::Other(msg) => {
log::error!("acquire error: {}", msg);
Status::Lost
}
},
),
};
@ -175,10 +179,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let view_id = match texture {
Some(suf_texture) => {
let raw = device
.raw
.create_texture_view(suf_texture.borrow(), &hal_desc)
.map_err(DeviceError::from)?;
let raw = unsafe {
device
.raw
.create_texture_view(suf_texture.borrow(), &hal_desc)
.map_err(DeviceError::from)?
};
let view = resource::TextureView {
raw,
source: resource::TextureViewSource::SwapChain(Stored {
@ -284,6 +290,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal::SurfaceError::Lost => Ok(Status::Lost),
hal::SurfaceError::Device(err) => Err(SwapChainError::from(DeviceError::from(err))),
hal::SurfaceError::Outdated => Ok(Status::Outdated),
hal::SurfaceError::Other(msg) => {
log::error!("acquire error: {}", msg);
Err(SwapChainError::InvalidSurface)
}
},
}
}

View File

@ -599,7 +599,7 @@ impl TrackerSet {
}
/// Clear all the trackers.
pub fn clear(&mut self) {
pub fn _clear(&mut self) {
self.buffers.clear();
self.textures.clear();
self.views.clear();

View File

@ -15,6 +15,7 @@ license = "MPL-2.0"
[dependencies]
bitflags = "1.0"
raw-window-handle = "0.3"
smallvec = "1"
thiserror = "1"
wgt = { package = "wgpu-types", path = "../wgpu-types" }

View File

@ -37,6 +37,12 @@ impl crate::Api for Api {
}
impl crate::Instance<Api> for Context {
unsafe fn create_surface(
&self,
rwh: &impl raw_window_handle::HasRawWindowHandle,
) -> Result<Context, crate::UnsupportedWindow> {
Ok(Context)
}
unsafe fn enumerate_adapters(&self) -> Vec<crate::ExposedAdapter<Api>> {
Vec::new()
}
@ -266,10 +272,10 @@ impl crate::CommandBuffer<Api> for Encoder {
) {
}
// render
unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor<Api>) {}
unsafe fn end_render_pass(&mut self) {}
unsafe fn begin_compute_pass(&mut self) {}
unsafe fn end_compute_pass(&mut self) {}
unsafe fn set_bind_group(
&mut self,
@ -357,6 +363,11 @@ impl crate::CommandBuffer<Api> for Encoder {
) {
}
// compute
unsafe fn begin_compute_pass(&mut self) {}
unsafe fn end_compute_pass(&mut self) {}
unsafe fn set_compute_pipeline(&mut self, pipeline: &Resource) {}
unsafe fn dispatch(&mut self, count: [u32; 3]) {}

View File

@ -96,6 +96,10 @@ pub enum SurfaceError {
Other(&'static str),
}
#[derive(Clone, Debug, PartialEq, Error)]
#[error("Window handle is not supported")]
pub struct UnsupportedWindow;
pub trait Api: Clone + Sized {
type Instance: Instance<Self>;
type Surface: Surface<Self>;
@ -121,6 +125,10 @@ pub trait Api: Clone + Sized {
}
pub trait Instance<A: Api> {
unsafe fn create_surface(
&self,
rwh: &impl raw_window_handle::HasRawWindowHandle,
) -> Result<A::Surface, UnsupportedWindow>;
unsafe fn enumerate_adapters(&self) -> Vec<ExposedAdapter<A>>;
}