Add Suballocator::suballocations (#2499)

* Add `Suballocator::suballocations`

* Add missing `Send` and `Sync` impls for `free_list::Suballocations`

* Missed docs

* Strange import, what is rust-analyzer smoking
This commit is contained in:
marc0246 2024-03-16 10:52:51 +01:00 committed by GitHub
parent f911996534
commit 984cbeb0c3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 409 additions and 149 deletions

View File

@ -1,4 +1,6 @@
use super::{AllocationType, Region, Suballocation, Suballocator, SuballocatorError};
use super::{
AllocationType, Region, Suballocation, SuballocationNode, Suballocator, SuballocatorError,
};
use crate::{
memory::{
allocator::{align_up, array_vec::ArrayVec, AllocationHandle, DeviceLayout},
@ -6,10 +8,7 @@ use crate::{
},
DeviceSize, NonZeroDeviceSize,
};
use std::{
cell::{Cell, UnsafeCell},
cmp,
};
use std::cmp;
/// A [suballocator] whose structure forms a binary tree of power-of-two-sized suballocations.
///
@ -62,8 +61,11 @@ use std::{
pub struct BuddyAllocator {
region_offset: DeviceSize,
// Total memory remaining in the region.
free_size: Cell<DeviceSize>,
state: UnsafeCell<BuddyAllocatorState>,
free_size: DeviceSize,
// Every order has its own free-list for convenience, so that we don't have to traverse a tree.
// Each free-list is sorted by offset because we want to find the first-fit as this strategy
// minimizes external fragmentation.
free_list: ArrayVec<Vec<DeviceSize>, { Self::MAX_ORDERS }>,
}
impl BuddyAllocator {
@ -75,6 +77,8 @@ impl BuddyAllocator {
}
unsafe impl Suballocator for BuddyAllocator {
type Suballocations<'a> = std::iter::Empty<SuballocationNode>;
/// Creates a new `BuddyAllocator` for the given [region].
///
/// # Panics
@ -93,24 +97,21 @@ unsafe impl Suballocator for BuddyAllocator {
assert!(max_order < BuddyAllocator::MAX_ORDERS);
let free_size = Cell::new(region.size());
let mut free_list =
ArrayVec::new(max_order + 1, [EMPTY_FREE_LIST; BuddyAllocator::MAX_ORDERS]);
// The root node has the lowest offset and highest order, so it's the whole region.
free_list[max_order].push(region.offset());
let state = UnsafeCell::new(BuddyAllocatorState { free_list });
BuddyAllocator {
region_offset: region.offset(),
free_size,
state,
free_size: region.size(),
free_list,
}
}
#[inline]
fn allocate(
&self,
&mut self,
layout: DeviceLayout,
allocation_type: AllocationType,
buffer_image_granularity: DeviceAlignment,
@ -150,17 +151,16 @@ unsafe impl Suballocator for BuddyAllocator {
let size = cmp::max(size, BuddyAllocator::MIN_NODE_SIZE).next_power_of_two();
let min_order = (size / BuddyAllocator::MIN_NODE_SIZE).trailing_zeros() as usize;
let state = unsafe { &mut *self.state.get() };
// Start searching at the lowest possible order going up.
for (order, free_list) in state.free_list.iter_mut().enumerate().skip(min_order) {
for (order, free_list) in self.free_list.iter_mut().enumerate().skip(min_order) {
for (index, &offset) in free_list.iter().enumerate() {
if is_aligned(offset, alignment) {
free_list.remove(index);
// Go in the opposite direction, splitting nodes from higher orders. The lowest
// order doesn't need any splitting.
for (order, free_list) in state
for (order, free_list) in self
.free_list
.iter_mut()
.enumerate()
@ -185,7 +185,7 @@ unsafe impl Suballocator for BuddyAllocator {
// This can't overflow because suballocation sizes in the free-list are
// constrained by the remaining size of the region.
self.free_size.set(self.free_size.get() - size);
self.free_size -= size;
return Ok(Suballocation {
offset,
@ -206,17 +206,16 @@ unsafe impl Suballocator for BuddyAllocator {
}
#[inline]
unsafe fn deallocate(&self, suballocation: Suballocation) {
unsafe fn deallocate(&mut self, suballocation: Suballocation) {
let mut offset = suballocation.offset;
let order = suballocation.handle.as_index();
let min_order = order;
let state = unsafe { &mut *self.state.get() };
debug_assert!(!state.free_list[order].contains(&offset));
debug_assert!(!self.free_list[order].contains(&offset));
// Try to coalesce nodes while incrementing the order.
for (order, free_list) in state.free_list.iter_mut().enumerate().skip(min_order) {
for (order, free_list) in self.free_list.iter_mut().enumerate().skip(min_order) {
// This can't discard any bits because `order` is confined to the range
// [0, log(region.size / BuddyAllocator::MIN_NODE_SIZE)].
let size = BuddyAllocator::MIN_NODE_SIZE << order;
@ -241,7 +240,7 @@ unsafe impl Suballocator for BuddyAllocator {
// The sizes of suballocations allocated by `self` are constrained by that of
// its region, so they can't possibly overflow when added up.
self.free_size.set(self.free_size.get() + size);
self.free_size += size;
break;
}
@ -256,17 +255,14 @@ unsafe impl Suballocator for BuddyAllocator {
/// [internal fragmentation]: super#internal-fragmentation
#[inline]
fn free_size(&self) -> DeviceSize {
self.free_size.get()
self.free_size
}
#[inline]
fn cleanup(&mut self) {}
}
#[derive(Debug)]
struct BuddyAllocatorState {
// Every order has its own free-list for convenience, so that we don't have to traverse a tree.
// Each free-list is sorted by offset because we want to find the first-fit as this strategy
// minimizes external fragmentation.
free_list: ArrayVec<Vec<DeviceSize>, { BuddyAllocator::MAX_ORDERS }>,
#[inline]
fn suballocations(&self) -> Self::Suballocations<'_> {
todo!()
}
}

View File

@ -1,14 +1,15 @@
use super::{AllocationType, Region, Suballocation, Suballocator, SuballocatorError};
use super::{
are_blocks_on_same_page, AllocationType, Region, Suballocation, SuballocationNode,
SuballocationType, Suballocator, SuballocatorError,
};
use crate::{
memory::{
allocator::{
align_up, suballocator::are_blocks_on_same_page, AllocationHandle, DeviceLayout,
},
allocator::{align_up, AllocationHandle, DeviceLayout},
DeviceAlignment,
},
DeviceSize,
};
use std::cell::Cell;
use std::iter::FusedIterator;
/// A [suballocator] which can allocate dynamically, but can only free all allocations at once.
///
@ -53,8 +54,8 @@ use std::cell::Cell;
#[derive(Debug)]
pub struct BumpAllocator {
region: Region,
free_start: Cell<DeviceSize>,
prev_allocation_type: Cell<AllocationType>,
free_start: DeviceSize,
prev_allocation_type: AllocationType,
}
impl BumpAllocator {
@ -63,26 +64,46 @@ impl BumpAllocator {
/// [region]: Suballocator#regions
#[inline]
pub fn reset(&mut self) {
*self.free_start.get_mut() = 0;
*self.prev_allocation_type.get_mut() = AllocationType::Unknown;
self.free_start = 0;
self.prev_allocation_type = AllocationType::Unknown;
}
fn suballocation_node(&self, part: usize) -> SuballocationNode {
if part == 0 {
SuballocationNode {
offset: self.region.offset(),
size: self.free_start,
allocation_type: self.prev_allocation_type.into(),
}
} else {
debug_assert_eq!(part, 1);
SuballocationNode {
offset: self.region.offset() + self.free_start,
size: self.free_size(),
allocation_type: SuballocationType::Free,
}
}
}
}
unsafe impl Suballocator for BumpAllocator {
type Suballocations<'a> = Suballocations<'a>;
/// Creates a new `BumpAllocator` for the given [region].
///
/// [region]: Suballocator#regions
fn new(region: Region) -> Self {
BumpAllocator {
region,
free_start: Cell::new(0),
prev_allocation_type: Cell::new(AllocationType::Unknown),
free_start: 0,
prev_allocation_type: AllocationType::Unknown,
}
}
#[inline]
fn allocate(
&self,
&mut self,
layout: DeviceLayout,
allocation_type: AllocationType,
buffer_image_granularity: DeviceAlignment,
@ -96,13 +117,13 @@ unsafe impl Suballocator for BumpAllocator {
// These can't overflow because suballocation offsets are bounded by the region, whose end
// can itself not exceed `DeviceLayout::MAX_SIZE`.
let prev_end = self.region.offset() + self.free_start.get();
let prev_end = self.region.offset() + self.free_start;
let mut offset = align_up(prev_end, alignment);
if buffer_image_granularity != DeviceAlignment::MIN
&& prev_end > 0
&& are_blocks_on_same_page(0, prev_end, offset, buffer_image_granularity)
&& has_granularity_conflict(self.prev_allocation_type.get(), allocation_type)
&& has_granularity_conflict(self.prev_allocation_type, allocation_type)
{
offset = align_up(offset, buffer_image_granularity);
}
@ -115,8 +136,8 @@ unsafe impl Suballocator for BumpAllocator {
return Err(SuballocatorError::OutOfRegionMemory);
}
self.free_start.set(free_start);
self.prev_allocation_type.set(allocation_type);
self.free_start = free_start;
self.prev_allocation_type = allocation_type;
Ok(Suballocation {
offset,
@ -127,17 +148,91 @@ unsafe impl Suballocator for BumpAllocator {
}
#[inline]
unsafe fn deallocate(&self, _suballocation: Suballocation) {
unsafe fn deallocate(&mut self, _suballocation: Suballocation) {
// such complex, very wow
}
#[inline]
fn free_size(&self) -> DeviceSize {
self.region.size() - self.free_start.get()
self.region.size() - self.free_start
}
#[inline]
fn cleanup(&mut self) {
self.reset();
}
#[inline]
fn suballocations(&self) -> Self::Suballocations<'_> {
let start = if self.free_start == 0 { 1 } else { 0 };
let end = if self.free_start == self.region.size() {
1
} else {
2
};
Suballocations {
allocator: self,
start,
end,
}
}
}
#[derive(Clone)]
pub struct Suballocations<'a> {
allocator: &'a BumpAllocator,
start: usize,
end: usize,
}
impl Iterator for Suballocations<'_> {
type Item = SuballocationNode;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.len() != 0 {
let node = self.allocator.suballocation_node(self.start);
self.start += 1;
Some(node)
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
}
impl DoubleEndedIterator for Suballocations<'_> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
if self.len() != 0 {
self.end -= 1;
let node = self.allocator.suballocation_node(self.end);
Some(node)
} else {
None
}
}
}
impl ExactSizeIterator for Suballocations<'_> {
#[inline]
fn len(&self) -> usize {
self.end - self.start
}
}
impl FusedIterator for Suballocations<'_> {}

View File

@ -1,18 +1,15 @@
use super::{AllocationType, Region, Suballocation, Suballocator, SuballocatorError};
use super::{
are_blocks_on_same_page, AllocationType, Region, Suballocation, SuballocationNode,
SuballocationType, Suballocator, SuballocatorError,
};
use crate::{
memory::{
allocator::{
align_up, suballocator::are_blocks_on_same_page, AllocationHandle, DeviceLayout,
},
allocator::{align_up, AllocationHandle, DeviceLayout},
is_aligned, DeviceAlignment,
},
DeviceSize,
};
use std::{
cell::{Cell, UnsafeCell},
cmp,
ptr::NonNull,
};
use std::{cmp, iter::FusedIterator, marker::PhantomData, ptr::NonNull};
/// A [suballocator] that uses the most generic [free-list].
///
@ -65,47 +62,49 @@ use std::{
pub struct FreeListAllocator {
region_offset: DeviceSize,
// Total memory remaining in the region.
free_size: Cell<DeviceSize>,
state: UnsafeCell<FreeListAllocatorState>,
free_size: DeviceSize,
suballocations: SuballocationList,
}
unsafe impl Send for FreeListAllocator {}
unsafe impl Suballocator for FreeListAllocator {
type Suballocations<'a> = Suballocations<'a>;
/// Creates a new `FreeListAllocator` for the given [region].
///
/// [region]: Suballocator#regions
fn new(region: Region) -> Self {
let free_size = Cell::new(region.size());
let node_allocator = slabbin::SlabAllocator::<SuballocationListNode>::new(32);
let mut free_list = Vec::with_capacity(32);
let root_ptr = node_allocator.allocate();
let root = SuballocationListNode {
prev: None,
next: None,
offset: region.offset(),
size: region.size(),
ty: SuballocationType::Free,
allocation_type: SuballocationType::Free,
};
unsafe { root_ptr.as_ptr().write(root) };
let mut free_list = Vec::with_capacity(32);
free_list.push(root_ptr);
let state = UnsafeCell::new(FreeListAllocatorState {
node_allocator,
let suballocations = SuballocationList {
head: root_ptr,
tail: root_ptr,
len: 1,
free_list,
});
node_allocator,
};
FreeListAllocator {
region_offset: region.offset(),
free_size,
state,
free_size: region.size(),
suballocations,
}
}
#[inline]
fn allocate(
&self,
&mut self,
layout: DeviceLayout,
allocation_type: AllocationType,
buffer_image_granularity: DeviceAlignment,
@ -122,9 +121,8 @@ unsafe impl Suballocator for FreeListAllocator {
let size = layout.size();
let alignment = layout.alignment();
let state = unsafe { &mut *self.state.get() };
match state.free_list.last() {
match self.suballocations.free_list.last() {
Some(&last) if unsafe { (*last.as_ptr()).size } >= size => {
// We create a dummy node to compare against in the below binary search. The only
// fields of importance are `offset` and `size`. It is paramount that we set
@ -136,7 +134,7 @@ unsafe impl Suballocator for FreeListAllocator {
next: None,
offset: 0,
size,
ty: SuballocationType::Unknown,
allocation_type: SuballocationType::Unknown,
};
// This is almost exclusively going to return `Err`, but that's expected: we are
@ -149,11 +147,14 @@ unsafe impl Suballocator for FreeListAllocator {
//
// Note that `index == free_list.len()` can't be because we checked that the
// free-list contains a suballocation that is big enough.
let (Ok(index) | Err(index)) = state
let (Ok(index) | Err(index)) = self
.suballocations
.free_list
.binary_search_by_key(&dummy_node, |&ptr| unsafe { *ptr.as_ptr() });
for (index, &node_ptr) in state.free_list.iter().enumerate().skip(index) {
for (index, &node_ptr) in
self.suballocations.free_list.iter().enumerate().skip(index)
{
let node = unsafe { *node_ptr.as_ptr() };
// This can't overflow because suballocation offsets are bounded by the region,
@ -171,7 +172,7 @@ unsafe impl Suballocator for FreeListAllocator {
prev.size,
offset,
buffer_image_granularity,
) && has_granularity_conflict(prev.ty, allocation_type)
) && has_granularity_conflict(prev.allocation_type, allocation_type)
{
// This is overflow-safe for the same reason as above.
offset = align_up(offset, buffer_image_granularity);
@ -187,19 +188,19 @@ unsafe impl Suballocator for FreeListAllocator {
//
// `node.offset + node.size` can't overflow for the same reason as above.
if offset + size <= node.offset + node.size {
state.free_list.remove(index);
self.suballocations.free_list.remove(index);
// SAFETY:
// - `node` is free.
// - `offset` is that of `node`, possibly rounded up.
// - We checked that `offset + size` falls within `node`.
unsafe { state.split(node_ptr, offset, size) };
unsafe { self.suballocations.split(node_ptr, offset, size) };
unsafe { (*node_ptr.as_ptr()).ty = allocation_type.into() };
unsafe { (*node_ptr.as_ptr()).allocation_type = allocation_type.into() };
// This can't overflow because suballocation sizes in the free-list are
// constrained by the remaining size of the region.
self.free_size.set(self.free_size.get() - size);
self.free_size -= size;
return Ok(Suballocation {
offset,
@ -223,7 +224,7 @@ unsafe impl Suballocator for FreeListAllocator {
}
#[inline]
unsafe fn deallocate(&self, suballocation: Suballocation) {
unsafe fn deallocate(&mut self, suballocation: Suballocation) {
let node_ptr = suballocation
.handle
.as_ptr()
@ -235,44 +236,53 @@ unsafe impl Suballocator for FreeListAllocator {
let node_ptr = unsafe { NonNull::new_unchecked(node_ptr) };
let node = unsafe { *node_ptr.as_ptr() };
debug_assert!(node.ty != SuballocationType::Free);
debug_assert_ne!(node.allocation_type, SuballocationType::Free);
// Suballocation sizes are constrained by the size of the region, so they can't possibly
// overflow when added up.
self.free_size.set(self.free_size.get() + node.size);
self.free_size += node.size;
unsafe { (*node_ptr.as_ptr()).ty = SuballocationType::Free };
unsafe { (*node_ptr.as_ptr()).allocation_type = SuballocationType::Free };
let state = unsafe { &mut *self.state.get() };
unsafe { state.coalesce(node_ptr) };
unsafe { state.deallocate(node_ptr) };
unsafe { self.suballocations.coalesce(node_ptr) };
unsafe { self.suballocations.deallocate(node_ptr) };
}
#[inline]
fn free_size(&self) -> DeviceSize {
self.free_size.get()
self.free_size
}
#[inline]
fn cleanup(&mut self) {}
#[inline]
fn suballocations(&self) -> Self::Suballocations<'_> {
self.suballocations.iter()
}
}
#[derive(Debug)]
struct FreeListAllocatorState {
node_allocator: slabbin::SlabAllocator<SuballocationListNode>,
struct SuballocationList {
head: NonNull<SuballocationListNode>,
tail: NonNull<SuballocationListNode>,
len: usize,
// Free suballocations sorted by size in ascending order. This means we can always find a
// best-fit in *O*(log(*n*)) time in the worst case, and iterating in order is very efficient.
free_list: Vec<NonNull<SuballocationListNode>>,
node_allocator: slabbin::SlabAllocator<SuballocationListNode>,
}
unsafe impl Send for SuballocationList {}
unsafe impl Sync for SuballocationList {}
#[derive(Clone, Copy, Debug)]
struct SuballocationListNode {
prev: Option<NonNull<Self>>,
next: Option<NonNull<Self>>,
offset: DeviceSize,
size: DeviceSize,
ty: SuballocationType,
allocation_type: SuballocationType,
}
impl PartialEq for SuballocationListNode {
@ -300,48 +310,7 @@ impl Ord for SuballocationListNode {
}
}
/// Tells us if a suballocation is free, and if not, whether it is linear or not. This is needed in
/// order to be able to respect the buffer-image granularity.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum SuballocationType {
Unknown,
Linear,
NonLinear,
Free,
}
impl From<AllocationType> for SuballocationType {
fn from(ty: AllocationType) -> Self {
match ty {
AllocationType::Unknown => SuballocationType::Unknown,
AllocationType::Linear => SuballocationType::Linear,
AllocationType::NonLinear => SuballocationType::NonLinear,
}
}
}
impl FreeListAllocatorState {
/// Removes the target suballocation from the free-list.
///
/// # Safety
///
/// - `node_ptr` must refer to a currently free suballocation of `self`.
unsafe fn allocate(&mut self, node_ptr: NonNull<SuballocationListNode>) {
debug_assert!(self.free_list.contains(&node_ptr));
let node = unsafe { *node_ptr.as_ptr() };
match self
.free_list
.binary_search_by_key(&node, |&ptr| unsafe { *ptr.as_ptr() })
{
Ok(index) => {
self.free_list.remove(index);
}
Err(_) => unreachable!(),
}
}
impl SuballocationList {
/// Fits a suballocation inside the target one, splitting the target at the ends if required.
///
/// # Safety
@ -356,7 +325,7 @@ impl FreeListAllocatorState {
) {
let node = unsafe { *node_ptr.as_ptr() };
debug_assert!(node.ty == SuballocationType::Free);
debug_assert_eq!(node.allocation_type, SuballocationType::Free);
debug_assert!(offset >= node.offset);
debug_assert!(offset + size <= node.offset + node.size);
@ -372,7 +341,7 @@ impl FreeListAllocatorState {
next: Some(node_ptr),
offset: node.offset,
size: padding_front,
ty: SuballocationType::Free,
allocation_type: SuballocationType::Free,
};
unsafe { padding_ptr.as_ptr().write(padding) };
@ -387,6 +356,12 @@ impl FreeListAllocatorState {
// of the padding, so this can't overflow.
unsafe { (*node_ptr.as_ptr()).size -= padding.size };
if node_ptr == self.head {
self.head = padding_ptr;
}
self.len += 1;
// SAFETY: We just created this suballocation, so there's no way that it was
// deallocated already.
unsafe { self.deallocate(padding_ptr) };
@ -399,7 +374,7 @@ impl FreeListAllocatorState {
next: node.next,
offset: offset + size,
size: padding_back,
ty: SuballocationType::Free,
allocation_type: SuballocationType::Free,
};
unsafe { padding_ptr.as_ptr().write(padding) };
@ -411,6 +386,12 @@ impl FreeListAllocatorState {
// This is overflow-safe for the same reason as above.
unsafe { (*node_ptr.as_ptr()).size -= padding.size };
if node_ptr == self.tail {
self.tail = padding_ptr;
}
self.len += 1;
// SAFETY: Same as above.
unsafe { self.deallocate(padding_ptr) };
}
@ -439,12 +420,12 @@ impl FreeListAllocatorState {
unsafe fn coalesce(&mut self, node_ptr: NonNull<SuballocationListNode>) {
let node = unsafe { *node_ptr.as_ptr() };
debug_assert!(node.ty == SuballocationType::Free);
debug_assert_eq!(node.allocation_type, SuballocationType::Free);
if let Some(prev_ptr) = node.prev {
let prev = unsafe { *prev_ptr.as_ptr() };
if prev.ty == SuballocationType::Free {
if prev.allocation_type == SuballocationType::Free {
// SAFETY: We checked that the suballocation is free.
self.allocate(prev_ptr);
@ -458,11 +439,18 @@ impl FreeListAllocatorState {
unsafe { (*prev_ptr.as_ptr()).next = Some(node_ptr) };
}
if prev_ptr == self.head {
self.head = node_ptr;
}
self.len -= 1;
// SAFETY:
// - The suballocation is free.
// - The suballocation was removed from the free-list.
// - The next suballocation and possibly a previous suballocation have been updated
// such that they no longer reference the suballocation.
// - The head no longer points to the suballocation if it used to.
// All of these conditions combined guarantee that `prev_ptr` cannot be used again.
unsafe { self.node_allocator.deallocate(prev_ptr) };
}
@ -471,7 +459,7 @@ impl FreeListAllocatorState {
if let Some(next_ptr) = node.next {
let next = unsafe { *next_ptr.as_ptr() };
if next.ty == SuballocationType::Free {
if next.allocation_type == SuballocationType::Free {
// SAFETY: Same as above.
self.allocate(next_ptr);
@ -483,9 +471,123 @@ impl FreeListAllocatorState {
unsafe { (*next_ptr.as_ptr()).prev = Some(node_ptr) };
}
if next_ptr == self.tail {
self.tail = node_ptr;
}
self.len -= 1;
// SAFETY: Same as above.
unsafe { self.node_allocator.deallocate(next_ptr) };
}
}
}
/// Removes the target suballocation from the free-list.
///
/// # Safety
///
/// - `node_ptr` must refer to a currently free suballocation of `self`.
unsafe fn allocate(&mut self, node_ptr: NonNull<SuballocationListNode>) {
debug_assert!(self.free_list.contains(&node_ptr));
let node = unsafe { *node_ptr.as_ptr() };
match self
.free_list
.binary_search_by_key(&node, |&ptr| unsafe { *ptr.as_ptr() })
{
Ok(index) => {
self.free_list.remove(index);
}
Err(_) => unreachable!(),
}
}
fn iter(&self) -> Suballocations<'_> {
Suballocations {
head: Some(self.head),
tail: Some(self.tail),
len: self.len,
marker: PhantomData,
}
}
}
#[derive(Clone)]
pub struct Suballocations<'a> {
head: Option<NonNull<SuballocationListNode>>,
tail: Option<NonNull<SuballocationListNode>>,
len: usize,
marker: PhantomData<&'a SuballocationList>,
}
unsafe impl Send for Suballocations<'_> {}
unsafe impl Sync for Suballocations<'_> {}
impl Iterator for Suballocations<'_> {
type Item = SuballocationNode;
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.len != 0 {
if let Some(head) = self.head {
let head = unsafe { *head.as_ptr() };
self.head = head.next;
self.len -= 1;
Some(SuballocationNode {
offset: head.offset,
size: head.size,
allocation_type: head.allocation_type,
})
} else {
None
}
} else {
None
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len, Some(self.len))
}
#[inline]
fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
}
impl DoubleEndedIterator for Suballocations<'_> {
#[inline]
fn next_back(&mut self) -> Option<Self::Item> {
if self.len != 0 {
if let Some(tail) = self.tail {
let tail = unsafe { *tail.as_ptr() };
self.tail = tail.prev;
self.len -= 1;
Some(SuballocationNode {
offset: tail.offset,
size: tail.size,
allocation_type: tail.allocation_type,
})
} else {
None
}
} else {
None
}
}
}
impl ExactSizeIterator for Suballocations<'_> {
#[inline]
fn len(&self) -> usize {
self.len
}
}
impl FusedIterator for Suballocations<'_> {}

View File

@ -78,6 +78,15 @@ mod free_list;
/// [page]: super#pages
/// [buffer-image granularity]: super#buffer-image-granularity
pub unsafe trait Suballocator {
/// The type of iterator returned by [`suballocations`].
///
/// [`suballocations`]: Self::suballocations
type Suballocations<'a>: Iterator<Item = SuballocationNode>
+ DoubleEndedIterator
+ ExactSizeIterator
where
Self: Sized + 'a;
/// Creates a new suballocator for the given [region].
///
/// [region]: Self#regions
@ -115,7 +124,7 @@ pub unsafe trait Suballocator {
/// [buffer-image granularity]: super#buffer-image-granularity
/// [`DeviceMemory`]: crate::memory::DeviceMemory
fn allocate(
&self,
&mut self,
layout: DeviceLayout,
allocation_type: AllocationType,
buffer_image_granularity: DeviceAlignment,
@ -126,7 +135,7 @@ pub unsafe trait Suballocator {
/// # Safety
///
/// - `suballocation` must refer to a **currently allocated** suballocation of `self`.
unsafe fn deallocate(&self, suballocation: Suballocation);
unsafe fn deallocate(&mut self, suballocation: Suballocation);
/// Returns the total amount of free space that is left in the [region].
///
@ -137,6 +146,11 @@ pub unsafe trait Suballocator {
///
/// There must be no current allocations as they might get freed.
fn cleanup(&mut self);
/// Returns an iterator over the current suballocations.
fn suballocations(&self) -> Self::Suballocations<'_>
where
Self: Sized;
}
impl Debug for dyn Suballocator {
@ -299,6 +313,59 @@ impl Display for SuballocatorError {
}
}
/// A node within a [suballocator]'s list/tree of suballocations.
///
/// [suballocator]: Suballocator
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct SuballocationNode {
/// The **absolute** offset within the [region]. That means that this is already offset by the
/// region's offset, **not relative to beginning of the region**.
///
/// [region]: Suballocator#regions
pub offset: DeviceSize,
/// The size of the allocation.
pub size: DeviceSize,
/// Tells us if the allocation is free, and if not, what type of resources can be bound to it.
pub allocation_type: SuballocationType,
}
/// Tells us if an allocation within a [suballocator]'s list/tree of suballocations is free, and if
/// not, what type of resources can be bound to it. The suballocator needs to keep track of this in
/// order to be able to respect the buffer-image granularity.
///
/// [suballocator]: Suballocator
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum SuballocationType {
/// The type of resource is unknown, it might be either linear or non-linear. What this means
/// is that allocations created with this type must always be aligned to the buffer-image
/// granularity.
Unknown = 0,
/// The resource is linear, e.g. buffers, linear images. A linear allocation following another
/// linear allocation never needs to be aligned to the buffer-image granularity.
Linear = 1,
/// The resource is non-linear, e.g. optimal images. A non-linear allocation following another
/// non-linear allocation never needs to be aligned to the buffer-image granularity.
NonLinear = 2,
/// The allocation is free. It can take on any of the allocation types once allocated.
Free = 3,
}
impl From<AllocationType> for SuballocationType {
#[inline]
fn from(ty: AllocationType) -> Self {
match ty {
AllocationType::Unknown => SuballocationType::Unknown,
AllocationType::Linear => SuballocationType::Linear,
AllocationType::NonLinear => SuballocationType::NonLinear,
}
}
}
/// Checks if resouces A and B share a page.
///
/// > **Note**: Assumes `a_offset + a_size > 0` and `a_offset + a_size <= b_offset`.
@ -367,7 +434,7 @@ mod tests {
}
});
let allocator = allocator.into_inner();
let mut allocator = allocator.into_inner();
assert!(allocator
.allocate(DUMMY_LAYOUT, AllocationType::Unknown, DeviceAlignment::MIN)
@ -394,7 +461,7 @@ mod tests {
const REGION_SIZE: DeviceSize = 10 * 256;
const LAYOUT: DeviceLayout = unwrap(DeviceLayout::from_size_alignment(1, 256));
let allocator = FreeListAllocator::new(Region::new(0, REGION_SIZE).unwrap());
let mut allocator = FreeListAllocator::new(Region::new(0, REGION_SIZE).unwrap());
let mut allocs = Vec::with_capacity(10);
for _ in 0..10 {
@ -420,7 +487,7 @@ mod tests {
const GRANULARITY: DeviceAlignment = unwrap(DeviceAlignment::new(16));
const REGION_SIZE: DeviceSize = 2 * GRANULARITY.as_devicesize();
let allocator = FreeListAllocator::new(Region::new(0, REGION_SIZE).unwrap());
let mut allocator = FreeListAllocator::new(Region::new(0, REGION_SIZE).unwrap());
let mut linear_allocs = Vec::with_capacity(REGION_SIZE as usize / 2);
let mut nonlinear_allocs = Vec::with_capacity(REGION_SIZE as usize / 2);
@ -479,7 +546,7 @@ mod tests {
const MAX_ORDER: usize = 10;
const REGION_SIZE: DeviceSize = BuddyAllocator::MIN_NODE_SIZE << MAX_ORDER;
let allocator = BuddyAllocator::new(Region::new(0, REGION_SIZE).unwrap());
let mut allocator = BuddyAllocator::new(Region::new(0, REGION_SIZE).unwrap());
let mut allocs = Vec::with_capacity(1 << MAX_ORDER);
for order in 0..=MAX_ORDER {
@ -541,7 +608,7 @@ mod tests {
fn buddy_allocator_respects_alignment() {
const REGION_SIZE: DeviceSize = 4096;
let allocator = BuddyAllocator::new(Region::new(0, REGION_SIZE).unwrap());
let mut allocator = BuddyAllocator::new(Region::new(0, REGION_SIZE).unwrap());
{
let layout = DeviceLayout::from_size_alignment(1, 4096).unwrap();
@ -608,7 +675,7 @@ mod tests {
const GRANULARITY: DeviceAlignment = unwrap(DeviceAlignment::new(256));
const REGION_SIZE: DeviceSize = 2 * GRANULARITY.as_devicesize();
let allocator = BuddyAllocator::new(Region::new(0, REGION_SIZE).unwrap());
let mut allocator = BuddyAllocator::new(Region::new(0, REGION_SIZE).unwrap());
{
const ALLOCATIONS: DeviceSize = REGION_SIZE / BuddyAllocator::MIN_NODE_SIZE;