mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-21 14:23:45 +00:00
Auto merge of #121421 - saethlin:smarter-mono, r=oli-obk
Avoid lowering code under dead SwitchInt targets The objective of this PR is to detect and eliminate code which is guarded by an `if false`, even if that `false` is a constant which is not known until monomorphization, or is `intrinsics::debug_assertions()`. The effect of this is that we generate no LLVM IR the standard library's unsafe preconditions, when they are compiled in a build where they should be immediately optimized out. This mono-time optimization ensures that builds which disable debug assertions do not grow a linkage requirement against `core`, which compiler-builtins currently needs: https://github.com/rust-lang/rust/issues/121552 This revives the codegen side of https://github.com/rust-lang/rust/pull/91222 as planned in https://github.com/rust-lang/rust/issues/120848.
This commit is contained in:
commit
5a6c1aa2bc
@ -1237,6 +1237,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn codegen_block_as_unreachable(&mut self, bb: mir::BasicBlock) {
|
||||||
|
let llbb = match self.try_llbb(bb) {
|
||||||
|
Some(llbb) => llbb,
|
||||||
|
None => return,
|
||||||
|
};
|
||||||
|
let bx = &mut Bx::build(self.cx, llbb);
|
||||||
|
debug!("codegen_block_as_unreachable({:?})", bb);
|
||||||
|
bx.unreachable();
|
||||||
|
}
|
||||||
|
|
||||||
fn codegen_terminator(
|
fn codegen_terminator(
|
||||||
&mut self,
|
&mut self,
|
||||||
bx: &mut Bx,
|
bx: &mut Bx,
|
||||||
|
@ -256,13 +256,22 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
|||||||
// Apply debuginfo to the newly allocated locals.
|
// Apply debuginfo to the newly allocated locals.
|
||||||
fx.debug_introduce_locals(&mut start_bx);
|
fx.debug_introduce_locals(&mut start_bx);
|
||||||
|
|
||||||
|
let reachable_blocks = mir.reachable_blocks_in_mono(cx.tcx(), instance);
|
||||||
|
|
||||||
// The builders will be created separately for each basic block at `codegen_block`.
|
// The builders will be created separately for each basic block at `codegen_block`.
|
||||||
// So drop the builder of `start_llbb` to avoid having two at the same time.
|
// So drop the builder of `start_llbb` to avoid having two at the same time.
|
||||||
drop(start_bx);
|
drop(start_bx);
|
||||||
|
|
||||||
// Codegen the body of each block using reverse postorder
|
// Codegen the body of each block using reverse postorder
|
||||||
for (bb, _) in traversal::reverse_postorder(mir) {
|
for (bb, _) in traversal::reverse_postorder(mir) {
|
||||||
fx.codegen_block(bb);
|
if reachable_blocks.contains(bb) {
|
||||||
|
fx.codegen_block(bb);
|
||||||
|
} else {
|
||||||
|
// This may have references to things we didn't monomorphize, so we
|
||||||
|
// don't actually codegen the body. We still create the block so
|
||||||
|
// terminators in other blocks can reference it without worry.
|
||||||
|
fx.codegen_block_as_unreachable(bb);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ use crate::ty::print::{pretty_print_const, with_no_trimmed_paths};
|
|||||||
use crate::ty::print::{FmtPrinter, Printer};
|
use crate::ty::print::{FmtPrinter, Printer};
|
||||||
use crate::ty::visit::TypeVisitableExt;
|
use crate::ty::visit::TypeVisitableExt;
|
||||||
use crate::ty::{self, List, Ty, TyCtxt};
|
use crate::ty::{self, List, Ty, TyCtxt};
|
||||||
use crate::ty::{AdtDef, InstanceDef, UserTypeAnnotationIndex};
|
use crate::ty::{AdtDef, Instance, InstanceDef, UserTypeAnnotationIndex};
|
||||||
use crate::ty::{GenericArg, GenericArgsRef};
|
use crate::ty::{GenericArg, GenericArgsRef};
|
||||||
|
|
||||||
use rustc_data_structures::captures::Captures;
|
use rustc_data_structures::captures::Captures;
|
||||||
@ -27,6 +27,8 @@ pub use rustc_ast::Mutability;
|
|||||||
use rustc_data_structures::fx::FxHashMap;
|
use rustc_data_structures::fx::FxHashMap;
|
||||||
use rustc_data_structures::fx::FxHashSet;
|
use rustc_data_structures::fx::FxHashSet;
|
||||||
use rustc_data_structures::graph::dominators::Dominators;
|
use rustc_data_structures::graph::dominators::Dominators;
|
||||||
|
use rustc_data_structures::stack::ensure_sufficient_stack;
|
||||||
|
use rustc_index::bit_set::BitSet;
|
||||||
use rustc_index::{Idx, IndexSlice, IndexVec};
|
use rustc_index::{Idx, IndexSlice, IndexVec};
|
||||||
use rustc_serialize::{Decodable, Encodable};
|
use rustc_serialize::{Decodable, Encodable};
|
||||||
use rustc_span::symbol::Symbol;
|
use rustc_span::symbol::Symbol;
|
||||||
@ -640,6 +642,129 @@ impl<'tcx> Body<'tcx> {
|
|||||||
self.injection_phase.is_some()
|
self.injection_phase.is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Finds which basic blocks are actually reachable for a specific
|
||||||
|
/// monomorphization of this body.
|
||||||
|
///
|
||||||
|
/// This is allowed to have false positives; just because this says a block
|
||||||
|
/// is reachable doesn't mean that's necessarily true. It's thus always
|
||||||
|
/// legal for this to return a filled set.
|
||||||
|
///
|
||||||
|
/// Regardless, the [`BitSet::domain_size`] of the returned set will always
|
||||||
|
/// exactly match the number of blocks in the body so that `contains`
|
||||||
|
/// checks can be done without worrying about panicking.
|
||||||
|
///
|
||||||
|
/// This is mostly useful because it lets us skip lowering the `false` side
|
||||||
|
/// of `if <T as Trait>::CONST`, as well as `intrinsics::debug_assertions`.
|
||||||
|
pub fn reachable_blocks_in_mono(
|
||||||
|
&self,
|
||||||
|
tcx: TyCtxt<'tcx>,
|
||||||
|
instance: Instance<'tcx>,
|
||||||
|
) -> BitSet<BasicBlock> {
|
||||||
|
let mut set = BitSet::new_empty(self.basic_blocks.len());
|
||||||
|
self.reachable_blocks_in_mono_from(tcx, instance, &mut set, START_BLOCK);
|
||||||
|
set
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reachable_blocks_in_mono_from(
|
||||||
|
&self,
|
||||||
|
tcx: TyCtxt<'tcx>,
|
||||||
|
instance: Instance<'tcx>,
|
||||||
|
set: &mut BitSet<BasicBlock>,
|
||||||
|
bb: BasicBlock,
|
||||||
|
) {
|
||||||
|
if !set.insert(bb) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let data = &self.basic_blocks[bb];
|
||||||
|
|
||||||
|
if let Some((bits, targets)) = Self::try_const_mono_switchint(tcx, instance, data) {
|
||||||
|
let target = targets.target_for_value(bits);
|
||||||
|
ensure_sufficient_stack(|| {
|
||||||
|
self.reachable_blocks_in_mono_from(tcx, instance, set, target)
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for target in data.terminator().successors() {
|
||||||
|
ensure_sufficient_stack(|| {
|
||||||
|
self.reachable_blocks_in_mono_from(tcx, instance, set, target)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// If this basic block ends with a [`TerminatorKind::SwitchInt`] for which we can evaluate the
|
||||||
|
/// dimscriminant in monomorphization, we return the discriminant bits and the
|
||||||
|
/// [`SwitchTargets`], just so the caller doesn't also have to match on the terminator.
|
||||||
|
fn try_const_mono_switchint<'a>(
|
||||||
|
tcx: TyCtxt<'tcx>,
|
||||||
|
instance: Instance<'tcx>,
|
||||||
|
block: &'a BasicBlockData<'tcx>,
|
||||||
|
) -> Option<(u128, &'a SwitchTargets)> {
|
||||||
|
// There are two places here we need to evaluate a constant.
|
||||||
|
let eval_mono_const = |constant: &ConstOperand<'tcx>| {
|
||||||
|
let env = ty::ParamEnv::reveal_all();
|
||||||
|
let mono_literal = instance.instantiate_mir_and_normalize_erasing_regions(
|
||||||
|
tcx,
|
||||||
|
env,
|
||||||
|
crate::ty::EarlyBinder::bind(constant.const_),
|
||||||
|
);
|
||||||
|
let Some(bits) = mono_literal.try_eval_bits(tcx, env) else {
|
||||||
|
bug!("Couldn't evaluate constant {:?} in mono {:?}", constant, instance);
|
||||||
|
};
|
||||||
|
bits
|
||||||
|
};
|
||||||
|
|
||||||
|
let TerminatorKind::SwitchInt { discr, targets } = &block.terminator().kind else {
|
||||||
|
return None;
|
||||||
|
};
|
||||||
|
|
||||||
|
// If this is a SwitchInt(const _), then we can just evaluate the constant and return.
|
||||||
|
let discr = match discr {
|
||||||
|
Operand::Constant(constant) => {
|
||||||
|
let bits = eval_mono_const(constant);
|
||||||
|
return Some((bits, targets));
|
||||||
|
}
|
||||||
|
Operand::Move(place) | Operand::Copy(place) => place,
|
||||||
|
};
|
||||||
|
|
||||||
|
// MIR for `if false` actually looks like this:
|
||||||
|
// _1 = const _
|
||||||
|
// SwitchInt(_1)
|
||||||
|
//
|
||||||
|
// And MIR for if intrinsics::debug_assertions() looks like this:
|
||||||
|
// _1 = cfg!(debug_assertions)
|
||||||
|
// SwitchInt(_1)
|
||||||
|
//
|
||||||
|
// So we're going to try to recognize this pattern.
|
||||||
|
//
|
||||||
|
// If we have a SwitchInt on a non-const place, we find the most recent statement that
|
||||||
|
// isn't a storage marker. If that statement is an assignment of a const to our
|
||||||
|
// discriminant place, we evaluate and return the const, as if we've const-propagated it
|
||||||
|
// into the SwitchInt.
|
||||||
|
|
||||||
|
let last_stmt = block.statements.iter().rev().find(|stmt| {
|
||||||
|
!matches!(stmt.kind, StatementKind::StorageDead(_) | StatementKind::StorageLive(_))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let (place, rvalue) = last_stmt.kind.as_assign()?;
|
||||||
|
|
||||||
|
if discr != place {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
match rvalue {
|
||||||
|
Rvalue::NullaryOp(NullOp::UbCheck(_), _) => {
|
||||||
|
Some((tcx.sess.opts.debug_assertions as u128, targets))
|
||||||
|
}
|
||||||
|
Rvalue::Use(Operand::Constant(constant)) => {
|
||||||
|
let bits = eval_mono_const(constant);
|
||||||
|
Some((bits, targets))
|
||||||
|
}
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// For a `Location` in this scope, determine what the "caller location" at that point is. This
|
/// For a `Location` in this scope, determine what the "caller location" at that point is. This
|
||||||
/// is interesting because of inlining: the `#[track_caller]` attribute of inlined functions
|
/// is interesting because of inlining: the `#[track_caller]` attribute of inlined functions
|
||||||
/// must be honored. Falls back to the `tracked_caller` value for `#[track_caller]` functions,
|
/// must be honored. Falls back to the `tracked_caller` value for `#[track_caller]` functions,
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
use rustc_index::bit_set::BitSet;
|
|
||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
/// Preorder traversal of a graph.
|
/// Preorder traversal of a graph.
|
||||||
|
27
tests/codegen/precondition-checks.rs
Normal file
27
tests/codegen/precondition-checks.rs
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
//@ compile-flags: -Cno-prepopulate-passes -Copt-level=0 -Cdebug-assertions=no
|
||||||
|
|
||||||
|
// This test ensures that in a debug build which turns off debug assertions, we do not monomorphize
|
||||||
|
// any of the standard library's unsafe precondition checks.
|
||||||
|
// The naive codegen of those checks contains the actual check underneath an `if false`, which
|
||||||
|
// could be optimized out if optimizations are enabled. But if we rely on optimizations to remove
|
||||||
|
// panic branches, then we can't link compiler_builtins without optimizing it, which means that
|
||||||
|
// -Zbuild-std doesn't work with -Copt-level=0.
|
||||||
|
//
|
||||||
|
// In other words, this tests for a mandatory optimization.
|
||||||
|
|
||||||
|
#![crate_type = "lib"]
|
||||||
|
|
||||||
|
use std::ptr::NonNull;
|
||||||
|
|
||||||
|
// CHECK-LABEL: ; core::ptr::non_null::NonNull<T>::new_unchecked
|
||||||
|
// CHECK-NOT: call
|
||||||
|
// CHECK: }
|
||||||
|
|
||||||
|
// CHECK-LABEL: @nonnull_new
|
||||||
|
#[no_mangle]
|
||||||
|
pub unsafe fn nonnull_new(ptr: *mut u8) -> NonNull<u8> {
|
||||||
|
// CHECK: ; call core::ptr::non_null::NonNull<T>::new_unchecked
|
||||||
|
unsafe {
|
||||||
|
NonNull::new_unchecked(ptr)
|
||||||
|
}
|
||||||
|
}
|
41
tests/codegen/skip-mono-inside-if-false.rs
Normal file
41
tests/codegen/skip-mono-inside-if-false.rs
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
//@ compile-flags: -Cno-prepopulate-passes -Copt-level=0
|
||||||
|
|
||||||
|
#![crate_type = "lib"]
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn demo_for_i32() {
|
||||||
|
generic_impl::<i32>();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Two important things here:
|
||||||
|
// - We replace the "then" block with `unreachable` to avoid linking problems
|
||||||
|
// - We neither declare nor define the `big_impl` that said block "calls".
|
||||||
|
|
||||||
|
// CHECK-LABEL: ; skip_mono_inside_if_false::generic_impl
|
||||||
|
// CHECK: start:
|
||||||
|
// CHECK-NEXT: br label %[[ELSE_BRANCH:bb[0-9]+]]
|
||||||
|
// CHECK: [[ELSE_BRANCH]]:
|
||||||
|
// CHECK-NEXT: call skip_mono_inside_if_false::small_impl
|
||||||
|
// CHECK: bb{{[0-9]+}}:
|
||||||
|
// CHECK-NEXT: ret void
|
||||||
|
// CHECK: bb{{[0-9+]}}:
|
||||||
|
// CHECK-NEXT: unreachable
|
||||||
|
|
||||||
|
fn generic_impl<T>() {
|
||||||
|
trait MagicTrait {
|
||||||
|
const IS_BIG: bool;
|
||||||
|
}
|
||||||
|
impl<T> MagicTrait for T {
|
||||||
|
const IS_BIG: bool = std::mem::size_of::<T>() > 10;
|
||||||
|
}
|
||||||
|
if T::IS_BIG {
|
||||||
|
big_impl::<T>();
|
||||||
|
} else {
|
||||||
|
small_impl::<T>();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline(never)]
|
||||||
|
fn small_impl<T>() {}
|
||||||
|
#[inline(never)]
|
||||||
|
fn big_impl<T>() {}
|
Loading…
Reference in New Issue
Block a user