From 6f86591b8795fc45aad3dd59863c3b3afd246506 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20K=C3=A5re=20Alsaker?= Date: Mon, 14 Aug 2023 21:29:19 +0200 Subject: [PATCH] Address comments --- compiler/rustc_arena/src/lib.rs | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index 108476a499f..e45b7c154fa 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -366,16 +366,18 @@ unsafe impl Send for TypedArena {} #[inline(always)] fn align_down(val: usize, align: usize) -> usize { - assert!(align.is_power_of_two()); + debug_assert!(align.is_power_of_two()); val & !(align - 1) } #[inline(always)] -fn align(val: usize, align: usize) -> usize { - assert!(align.is_power_of_two()); +fn align_up(val: usize, align: usize) -> usize { + debug_assert!(align.is_power_of_two()); (val + align - 1) & !(align - 1) } +// Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them +// to optimize away alignment code. const DROPLESS_ALIGNMENT: usize = mem::align_of::(); /// An arena that can hold objects of multiple different types that impl `Copy` @@ -390,6 +392,8 @@ pub struct DroplessArena { /// start. (This is slightly simpler and faster than allocating upwards, /// see .) /// When this pointer crosses the start pointer, a new chunk is allocated. + /// + /// This is kept aligned to DROPLESS_ALIGNMENT. end: Cell<*mut u8>, /// A vector of arena chunks. @@ -433,13 +437,16 @@ impl DroplessArena { // Also ensure that this chunk can fit `additional`. new_cap = cmp::max(additional, new_cap); - let mut chunk = ArenaChunk::new(align(new_cap, PAGE)); + let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE)); self.start.set(chunk.start()); // Align the end to DROPLESS_ALIGNMENT let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT); - // Make sure we don't go past `start` - let end = cmp::max(chunk.start().addr(), end); + + // Make sure we don't go past `start`. This should not happen since the allocation + // should be at least DROPLESS_ALIGNMENT - 1 bytes. + debug_assert!(chunk.start().addr() <= end); + self.end.set(chunk.end().with_addr(end)); chunks.push(chunk); @@ -469,7 +476,7 @@ impl DroplessArena { let end = old_end.addr(); // Align allocated bytes so that `self.end` stays aligned to DROPLESS_ALIGNMENT - let bytes = align(layout.size(), DROPLESS_ALIGNMENT); + let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT); // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) }; @@ -477,6 +484,8 @@ impl DroplessArena { let new_end = align_down(end.checked_sub(bytes)?, layout.align()); if start <= new_end { let new_end = old_end.with_addr(new_end); + // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down` preserves alignment + // as both `end` and `bytes` are already aligned to DROPLESS_ALIGNMENT. self.end.set(new_end); Some(new_end) } else {