[wgpu-core] when mapping buffers for reading, mark buffers as initialized only when they have MAP_WRITE usage (#6178)

This commit is contained in:
Teodor Tanasoaia 2024-09-04 16:57:08 +02:00 committed by GitHub
parent ee35b0e586
commit 4e78829d82
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 74 additions and 11 deletions

View File

@ -336,19 +336,41 @@ fn map_buffer(
let mapped = unsafe { std::slice::from_raw_parts_mut(mapping.ptr.as_ptr(), size as usize) };
for uninitialized in buffer
.initialization_status
.write()
.drain(offset..(size + offset))
// We can't call flush_mapped_ranges in this case, so we can't drain the uninitialized ranges either
if !mapping.is_coherent
&& kind == HostMap::Read
&& !buffer.usage.contains(wgt::BufferUsages::MAP_WRITE)
{
// The mapping's pointer is already offset, however we track the
// uninitialized range relative to the buffer's start.
let fill_range =
(uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize;
mapped[fill_range].fill(0);
for uninitialized in buffer
.initialization_status
.write()
.uninitialized(offset..(size + offset))
{
// The mapping's pointer is already offset, however we track the
// uninitialized range relative to the buffer's start.
let fill_range =
(uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize;
mapped[fill_range].fill(0);
}
} else {
for uninitialized in buffer
.initialization_status
.write()
.drain(offset..(size + offset))
{
// The mapping's pointer is already offset, however we track the
// uninitialized range relative to the buffer's start.
let fill_range =
(uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize;
mapped[fill_range].fill(0);
if !mapping.is_coherent && kind == HostMap::Read {
unsafe { raw.flush_mapped_ranges(raw_buffer, &[uninitialized]) };
// NOTE: This is only possible when MAPPABLE_PRIMARY_BUFFERS is enabled.
if !mapping.is_coherent
&& kind == HostMap::Read
&& buffer.usage.contains(wgt::BufferUsages::MAP_WRITE)
{
unsafe { raw.flush_mapped_ranges(raw_buffer, &[uninitialized]) };
}
}
}

View File

@ -65,6 +65,35 @@ pub(crate) struct InitTracker<Idx: Ord + Copy + Default> {
uninitialized_ranges: UninitializedRangeVec<Idx>,
}
pub(crate) struct UninitializedIter<'a, Idx: fmt::Debug + Ord + Copy> {
uninitialized_ranges: &'a UninitializedRangeVec<Idx>,
drain_range: Range<Idx>,
next_index: usize,
}
impl<'a, Idx> Iterator for UninitializedIter<'a, Idx>
where
Idx: fmt::Debug + Ord + Copy,
{
type Item = Range<Idx>;
fn next(&mut self) -> Option<Self::Item> {
self.uninitialized_ranges
.get(self.next_index)
.and_then(|range| {
if range.start < self.drain_range.end {
self.next_index += 1;
Some(
range.start.max(self.drain_range.start)
..range.end.min(self.drain_range.end),
)
} else {
None
}
})
}
}
pub(crate) struct InitTrackerDrain<'a, Idx: fmt::Debug + Ord + Copy> {
uninitialized_ranges: &'a mut UninitializedRangeVec<Idx>,
drain_range: Range<Idx>,
@ -190,6 +219,18 @@ where
})
}
// Returns an iterator over the uninitialized ranges in a query range.
pub(crate) fn uninitialized(&mut self, drain_range: Range<Idx>) -> UninitializedIter<Idx> {
let index = self
.uninitialized_ranges
.partition_point(|r| r.end <= drain_range.start);
UninitializedIter {
drain_range,
uninitialized_ranges: &self.uninitialized_ranges,
next_index: index,
}
}
// Drains uninitialized ranges in a query range.
pub(crate) fn drain(&mut self, drain_range: Range<Idx>) -> InitTrackerDrain<Idx> {
let index = self