rust/compiler/rustc_codegen_cranelift/src/abi/mod.rs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

826 lines
32 KiB
Rust
Raw Normal View History

//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
mod comments;
mod pass_mode;
2019-08-31 17:28:09 +00:00
mod returning;
use std::borrow::Cow;
use std::mem;
use cranelift_codegen::ir::{ArgumentPurpose, SigRef};
use cranelift_codegen::isa::CallConv;
use cranelift_module::ModuleError;
use rustc_abi::ExternAbi;
use rustc_codegen_ssa::base::is_call_from_compiler_builtins_to_upstream_monomorphization;
use rustc_codegen_ssa::errors::CompilerBuiltinsCannotCall;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::ty::TypeVisitableExt;
use rustc_middle::ty::layout::FnAbiOf;
use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_session::Session;
use rustc_span::source_map::Spanned;
use rustc_target::callconv::{Conv, FnAbi, PassMode};
2018-07-19 17:33:42 +00:00
use self::pass_mode::*;
pub(crate) use self::returning::codegen_return;
use crate::prelude::*;
fn clif_sig_from_fn_abi<'tcx>(
2019-08-31 17:28:09 +00:00
tcx: TyCtxt<'tcx>,
default_call_conv: CallConv,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
2019-08-31 17:28:09 +00:00
) -> Signature {
let call_conv = conv_to_call_conv(tcx.sess, fn_abi.conv, default_call_conv);
let inputs = fn_abi.args.iter().flat_map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter());
let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
// Sometimes the first param is a pointer to the place where the return value needs to be stored.
let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
Signature { params, returns, call_conv }
}
pub(crate) fn conv_to_call_conv(sess: &Session, c: Conv, default_call_conv: CallConv) -> CallConv {
match c {
Conv::Rust | Conv::C => default_call_conv,
Conv::Cold | Conv::PreserveMost | Conv::PreserveAll => CallConv::Cold,
Conv::X86_64SysV => CallConv::SystemV,
Conv::X86_64Win64 => CallConv::WindowsFastcall,
// Should already get a back compat warning
Conv::X86Fastcall | Conv::X86Stdcall | Conv::X86ThisCall | Conv::X86VectorCall => {
default_call_conv
}
feat: `riscv-interrupt-{m,s}` calling conventions Similar to prior support added for the mips430, avr, and x86 targets this change implements the rough equivalent of clang's [`__attribute__((interrupt))`][clang-attr] for riscv targets, enabling e.g. ```rust static mut CNT: usize = 0; pub extern "riscv-interrupt-m" fn isr_m() { unsafe { CNT += 1; } } ``` to produce highly effective assembly like: ```asm pub extern "riscv-interrupt-m" fn isr_m() { 420003a0: 1141 addi sp,sp,-16 unsafe { CNT += 1; 420003a2: c62a sw a0,12(sp) 420003a4: c42e sw a1,8(sp) 420003a6: 3fc80537 lui a0,0x3fc80 420003aa: 63c52583 lw a1,1596(a0) # 3fc8063c <_ZN12esp_riscv_rt3CNT17hcec3e3a214887d53E.0> 420003ae: 0585 addi a1,a1,1 420003b0: 62b52e23 sw a1,1596(a0) } } 420003b4: 4532 lw a0,12(sp) 420003b6: 45a2 lw a1,8(sp) 420003b8: 0141 addi sp,sp,16 420003ba: 30200073 mret ``` (disassembly via `riscv64-unknown-elf-objdump -C -S --disassemble ./esp32c3-hal/target/riscv32imc-unknown-none-elf/release/examples/gpio_interrupt`) This outcome is superior to hand-coded interrupt routines which, lacking visibility into any non-assembly body of the interrupt handler, have to be very conservative and save the [entire CPU state to the stack frame][full-frame-save]. By instead asking LLVM to only save the registers that it uses, we defer the decision to the tool with the best context: it can more accurately account for the cost of spills if it knows that every additional register used is already at the cost of an implicit spill. At the LLVM level, this is apparently [implemented by] marking every register as "[callee-save]," matching the semantics of an interrupt handler nicely (it has to leave the CPU state just as it found it after its `{m|s}ret`). This approach is not suitable for every interrupt handler, as it makes no attempt to e.g. save the state in a user-accessible stack frame. For a full discussion of those challenges and tradeoffs, please refer to [the interrupt calling conventions RFC][rfc]. Inside rustc, this implementation differs from prior art because LLVM does not expose the "all-saved" function flavor as a calling convention directly, instead preferring to use an attribute that allows for differentiating between "machine-mode" and "superivsor-mode" interrupts. Finally, some effort has been made to guide those who may not yet be aware of the differences between machine-mode and supervisor-mode interrupts as to why no `riscv-interrupt` calling convention is exposed through rustc, and similarly for why `riscv-interrupt-u` makes no appearance (as it would complicate future LLVM upgrades). [clang-attr]: https://clang.llvm.org/docs/AttributeReference.html#interrupt-risc-v [full-frame-save]: https://github.com/esp-rs/esp-riscv-rt/blob/9281af2ecffe13e40992917316f36920c26acaf3/src/lib.rs#L440-L469 [implemented by]: https://github.com/llvm/llvm-project/blob/b7fb2a3fec7c187d58a6d338ab512d9173bca987/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp#L61-L67 [callee-save]: https://github.com/llvm/llvm-project/blob/973f1fe7a8591c7af148e573491ab68cc15b6ecf/llvm/lib/Target/RISCV/RISCVCallingConv.td#L30-L37 [rfc]: https://github.com/rust-lang/rfcs/pull/3246
2023-05-23 22:08:23 +00:00
Conv::X86Intr | Conv::RiscvInterrupt { .. } => {
sess.dcx().fatal(format!("interrupt call conv {c:?} not yet implemented"))
feat: `riscv-interrupt-{m,s}` calling conventions Similar to prior support added for the mips430, avr, and x86 targets this change implements the rough equivalent of clang's [`__attribute__((interrupt))`][clang-attr] for riscv targets, enabling e.g. ```rust static mut CNT: usize = 0; pub extern "riscv-interrupt-m" fn isr_m() { unsafe { CNT += 1; } } ``` to produce highly effective assembly like: ```asm pub extern "riscv-interrupt-m" fn isr_m() { 420003a0: 1141 addi sp,sp,-16 unsafe { CNT += 1; 420003a2: c62a sw a0,12(sp) 420003a4: c42e sw a1,8(sp) 420003a6: 3fc80537 lui a0,0x3fc80 420003aa: 63c52583 lw a1,1596(a0) # 3fc8063c <_ZN12esp_riscv_rt3CNT17hcec3e3a214887d53E.0> 420003ae: 0585 addi a1,a1,1 420003b0: 62b52e23 sw a1,1596(a0) } } 420003b4: 4532 lw a0,12(sp) 420003b6: 45a2 lw a1,8(sp) 420003b8: 0141 addi sp,sp,16 420003ba: 30200073 mret ``` (disassembly via `riscv64-unknown-elf-objdump -C -S --disassemble ./esp32c3-hal/target/riscv32imc-unknown-none-elf/release/examples/gpio_interrupt`) This outcome is superior to hand-coded interrupt routines which, lacking visibility into any non-assembly body of the interrupt handler, have to be very conservative and save the [entire CPU state to the stack frame][full-frame-save]. By instead asking LLVM to only save the registers that it uses, we defer the decision to the tool with the best context: it can more accurately account for the cost of spills if it knows that every additional register used is already at the cost of an implicit spill. At the LLVM level, this is apparently [implemented by] marking every register as "[callee-save]," matching the semantics of an interrupt handler nicely (it has to leave the CPU state just as it found it after its `{m|s}ret`). This approach is not suitable for every interrupt handler, as it makes no attempt to e.g. save the state in a user-accessible stack frame. For a full discussion of those challenges and tradeoffs, please refer to [the interrupt calling conventions RFC][rfc]. Inside rustc, this implementation differs from prior art because LLVM does not expose the "all-saved" function flavor as a calling convention directly, instead preferring to use an attribute that allows for differentiating between "machine-mode" and "superivsor-mode" interrupts. Finally, some effort has been made to guide those who may not yet be aware of the differences between machine-mode and supervisor-mode interrupts as to why no `riscv-interrupt` calling convention is exposed through rustc, and similarly for why `riscv-interrupt-u` makes no appearance (as it would complicate future LLVM upgrades). [clang-attr]: https://clang.llvm.org/docs/AttributeReference.html#interrupt-risc-v [full-frame-save]: https://github.com/esp-rs/esp-riscv-rt/blob/9281af2ecffe13e40992917316f36920c26acaf3/src/lib.rs#L440-L469 [implemented by]: https://github.com/llvm/llvm-project/blob/b7fb2a3fec7c187d58a6d338ab512d9173bca987/llvm/lib/Target/RISCV/RISCVRegisterInfo.cpp#L61-L67 [callee-save]: https://github.com/llvm/llvm-project/blob/973f1fe7a8591c7af148e573491ab68cc15b6ecf/llvm/lib/Target/RISCV/RISCVCallingConv.td#L30-L37 [rfc]: https://github.com/rust-lang/rfcs/pull/3246
2023-05-23 22:08:23 +00:00
}
Conv::ArmAapcs => sess.dcx().fatal("aapcs call conv not yet implemented"),
Conv::CCmseNonSecureCall => {
sess.dcx().fatal("C-cmse-nonsecure-call call conv is not yet implemented");
}
2024-08-15 07:55:56 +00:00
Conv::CCmseNonSecureEntry => {
sess.dcx().fatal("C-cmse-nonsecure-entry call conv is not yet implemented");
}
Conv::Msp430Intr | Conv::GpuKernel | Conv::AvrInterrupt | Conv::AvrNonBlockingInterrupt => {
unreachable!("tried to use {c:?} call conv which only exists on an unsupported target");
}
}
2018-07-19 17:33:42 +00:00
}
pub(crate) fn get_function_sig<'tcx>(
tcx: TyCtxt<'tcx>,
default_call_conv: CallConv,
2018-08-11 11:59:08 +00:00
inst: Instance<'tcx>,
) -> Signature {
assert!(!inst.args.has_infer());
2021-09-01 21:29:15 +00:00
clif_sig_from_fn_abi(
tcx,
default_call_conv,
&FullyMonomorphizedLayoutCx(tcx).fn_abi_of_instance(inst, ty::List::empty()),
2021-09-01 21:29:15 +00:00
)
2018-08-11 11:59:08 +00:00
}
2019-01-02 11:20:32 +00:00
/// Instance must be monomorphized
pub(crate) fn import_function<'tcx>(
tcx: TyCtxt<'tcx>,
module: &mut dyn Module,
2019-01-02 11:20:32 +00:00
inst: Instance<'tcx>,
) -> FuncId {
let name = tcx.symbol_name(inst).name;
let sig = get_function_sig(tcx, module.target_config().default_call_conv, inst);
match module.declare_function(name, Linkage::Import, &sig) {
Ok(func_id) => func_id,
Err(ModuleError::IncompatibleDeclaration(_)) => tcx.dcx().fatal(format!(
"attempt to declare `{name}` as function, but it was already declared as static"
)),
Err(ModuleError::IncompatibleSignature(_, prev_sig, new_sig)) => tcx.dcx().fatal(format!(
"attempt to declare `{name}` with signature {new_sig:?}, \
but it was already declared with signature {prev_sig:?}"
)),
Err(err) => Err::<_, _>(err).unwrap(),
}
2019-01-02 11:20:32 +00:00
}
2018-09-08 16:00:06 +00:00
impl<'tcx> FunctionCx<'_, '_, 'tcx> {
2018-09-08 16:00:06 +00:00
/// Instance must be monomorphized
pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
let func_id = import_function(self.tcx, self.module, inst);
let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
2018-12-28 16:07:40 +00:00
if self.clif_comments.enabled() {
self.add_comment(func_ref, format!("{:?}", inst));
}
2018-12-28 16:07:40 +00:00
2018-12-27 09:59:01 +00:00
func_ref
2018-07-19 17:33:42 +00:00
}
pub(crate) fn lib_call(
2018-07-30 13:34:34 +00:00
&mut self,
name: &str,
params: Vec<AbiParam>,
mut returns: Vec<AbiParam>,
2018-07-30 13:34:34 +00:00
args: &[Value],
) -> Cow<'_, [Value]> {
// Pass i128 arguments by-ref on Windows.
let (params, args): (Vec<_>, Cow<'_, [_]>) = if self.tcx.sess.target.is_like_windows {
let (params, args): (Vec<_>, Vec<_>) = params
.into_iter()
.zip(args)
.map(|(param, &arg)| {
if param.value_type == types::I128 {
let arg_ptr = self.create_stack_slot(16, 16);
arg_ptr.store(self, arg, MemFlags::trusted());
(AbiParam::new(self.pointer_type), arg_ptr.get_addr(self))
} else {
(param, arg)
}
})
.unzip();
(params, args.into())
} else {
(params, args.into())
};
let ret_single_i128 = returns.len() == 1 && returns[0].value_type == types::I128;
if ret_single_i128 && self.tcx.sess.target.is_like_windows {
// Return i128 using the vector ABI on Windows
returns[0].value_type = types::I64X2;
let ret = self.lib_call_unadjusted(name, params, returns, &args)[0];
// FIXME(bytecodealliance/wasmtime#6104) use bitcast instead of store to get from i64x2 to i128
let ret_ptr = self.create_stack_slot(16, 16);
ret_ptr.store(self, ret, MemFlags::trusted());
Cow::Owned(vec![ret_ptr.load(self, types::I128, MemFlags::trusted())])
} else if ret_single_i128 && self.tcx.sess.target.arch == "s390x" {
// Return i128 using a return area pointer on s390x.
let mut params = params;
let mut args = args.to_vec();
params.insert(0, AbiParam::new(self.pointer_type));
let ret_ptr = self.create_stack_slot(16, 16);
args.insert(0, ret_ptr.get_addr(self));
self.lib_call_unadjusted(name, params, vec![], &args);
Cow::Owned(vec![ret_ptr.load(self, types::I128, MemFlags::trusted())])
} else {
Cow::Borrowed(self.lib_call_unadjusted(name, params, returns, &args))
}
}
fn lib_call_unadjusted(
&mut self,
name: &str,
params: Vec<AbiParam>,
returns: Vec<AbiParam>,
args: &[Value],
) -> &[Value] {
let sig = Signature { params, returns, call_conv: self.target_config.default_call_conv };
let func_id = self.module.declare_function(name, Linkage::Import, &sig).unwrap();
let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
if self.clif_comments.enabled() {
self.add_comment(func_ref, format!("{:?}", name));
}
2018-07-30 13:34:34 +00:00
let call_inst = self.bcx.ins().call(func_ref, args);
if self.clif_comments.enabled() {
self.add_comment(call_inst, format!("lib_call {}", name));
2019-07-30 13:00:15 +00:00
}
2018-07-30 13:34:34 +00:00
let results = self.bcx.inst_results(call_inst);
assert!(results.len() <= 2, "{}", results.len());
results
2018-07-30 13:34:34 +00:00
}
2018-07-19 17:33:42 +00:00
}
/// Make a [`CPlace`] capable of holding value of the specified type.
fn make_local_place<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
2018-12-25 15:47:33 +00:00
local: Local,
layout: TyAndLayout<'tcx>,
2018-12-25 15:47:33 +00:00
is_ssa: bool,
) -> CPlace<'tcx> {
if layout.is_unsized() {
fx.tcx.dcx().span_fatal(
fx.mir.local_decls[local].source_info.span,
"unsized locals are not yet supported",
);
}
2018-12-25 15:47:33 +00:00
let place = if is_ssa {
if let BackendRepr::ScalarPair(_, _) = layout.backend_repr {
2020-07-02 22:23:21 +00:00
CPlace::new_var_pair(fx, local, layout)
} else {
CPlace::new_var(fx, local, layout)
}
2018-12-25 15:47:33 +00:00
} else {
CPlace::new_stack_slot(fx, layout)
2018-12-25 15:47:33 +00:00
};
self::comments::add_local_place_comments(fx, place, local);
place
2018-12-25 15:47:33 +00:00
}
pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_block: Block) {
fx.bcx.append_block_params_for_function_params(start_block);
fx.bcx.switch_to_block(start_block);
fx.bcx.ins().nop();
2018-08-09 08:46:56 +00:00
let ssa_analyzed = crate::analyze::analyze(fx);
2018-12-28 16:07:40 +00:00
2019-08-30 13:07:15 +00:00
self::comments::add_args_header_comment(fx);
2018-12-27 09:59:01 +00:00
let mut block_params_iter = fx.bcx.func.dfg.block_params(start_block).to_vec().into_iter();
let ret_place =
self::returning::codegen_return_param(fx, &ssa_analyzed, &mut block_params_iter);
assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
// None means pass_mode == NoPass
2018-12-27 09:59:01 +00:00
enum ArgKind<'tcx> {
Normal(Option<CValue<'tcx>>),
Spread(Vec<Option<CValue<'tcx>>>),
}
// FIXME implement variadics in cranelift
if fx.fn_abi.c_variadic {
fx.tcx.dcx().span_fatal(
fx.mir.span,
"Defining variadic functions is not yet supported by Cranelift",
);
}
let mut arg_abis_iter = fx.fn_abi.args.iter();
2018-08-14 10:13:07 +00:00
let func_params = fx
.mir
.args_iter()
.map(|local| {
2020-10-28 07:25:06 +00:00
let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty);
2018-08-14 10:13:07 +00:00
// Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
if Some(local) == fx.mir.spread_arg {
// This argument (e.g. the last argument in the "rust-call" ABI)
// is a tuple that was spread at the ABI level and now we have
// to reconstruct it into a tuple local variable, from multiple
// individual function arguments.
let tupled_arg_tys = match arg_ty.kind() {
ty::Tuple(ref tys) => tys,
2018-08-14 10:13:07 +00:00
_ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
};
2018-12-27 09:59:01 +00:00
let mut params = Vec::new();
2022-02-07 15:06:31 +00:00
for (i, _arg_ty) in tupled_arg_tys.iter().enumerate() {
let arg_abi = arg_abis_iter.next().unwrap();
let param =
cvalue_for_param(fx, Some(local), Some(i), arg_abi, &mut block_params_iter);
2018-12-27 09:59:01 +00:00
params.push(param);
2018-08-14 10:13:07 +00:00
}
2018-12-27 09:59:01 +00:00
(local, ArgKind::Spread(params), arg_ty)
2018-08-14 10:13:07 +00:00
} else {
let arg_abi = arg_abis_iter.next().unwrap();
let param =
cvalue_for_param(fx, Some(local), None, arg_abi, &mut block_params_iter);
2019-02-21 14:06:09 +00:00
(local, ArgKind::Normal(param), arg_ty)
}
2018-10-10 17:07:13 +00:00
})
.collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
2020-01-11 15:49:42 +00:00
assert!(fx.caller_location.is_none());
if fx.instance.def.requires_caller_location(fx.tcx) {
2020-01-11 15:49:42 +00:00
// Store caller location for `#[track_caller]`.
let arg_abi = arg_abis_iter.next().unwrap();
fx.caller_location =
Some(cvalue_for_param(fx, None, None, arg_abi, &mut block_params_iter).unwrap());
2020-01-11 15:49:42 +00:00
}
assert!(arg_abis_iter.next().is_none(), "ArgAbi left behind");
assert!(block_params_iter.next().is_none(), "arg_value left behind");
2018-08-14 16:52:43 +00:00
2019-08-30 13:07:15 +00:00
self::comments::add_locals_header_comment(fx);
for (local, arg_kind, ty) in func_params {
// While this is normally an optimization to prevent an unnecessary copy when an argument is
// not mutated by the current function, this is necessary to support unsized arguments.
if let ArgKind::Normal(Some(val)) = arg_kind {
if let Some((addr, meta)) = val.try_to_ptr() {
// Ownership of the value at the backing storage for an argument is passed to the
// callee per the ABI, so it is fine to borrow the backing storage of this argument
// to prevent a copy.
let place = if let Some(meta) = meta {
CPlace::for_ptr_with_extra(addr, meta, val.layout())
} else {
CPlace::for_ptr(addr, val.layout())
};
self::comments::add_local_place_comments(fx, place, local);
assert_eq!(fx.local_map.push(place), local);
continue;
}
}
let layout = fx.layout_of(ty);
let is_ssa = ssa_analyzed[local].is_ssa(fx, ty);
let place = make_local_place(fx, local, layout, is_ssa);
assert_eq!(fx.local_map.push(place), local);
2018-12-26 10:15:42 +00:00
match arg_kind {
2018-12-27 09:59:01 +00:00
ArgKind::Normal(param) => {
if let Some(param) = param {
place.write_cvalue(fx, param);
}
}
2018-12-27 09:59:01 +00:00
ArgKind::Spread(params) => {
for (i, param) in params.into_iter().enumerate() {
if let Some(param) = param {
place.place_field(fx, FieldIdx::new(i)).write_cvalue(fx, param);
}
}
}
2018-07-19 17:33:42 +00:00
}
}
2020-06-24 09:54:11 +00:00
for local in fx.mir.vars_and_temps_iter() {
2020-10-28 07:25:06 +00:00
let ty = fx.monomorphize(fx.mir.local_decls[local].ty);
2020-06-24 09:54:11 +00:00
let layout = fx.layout_of(ty);
2018-08-09 08:46:56 +00:00
let is_ssa = ssa_analyzed[local].is_ssa(fx, ty);
2018-08-09 08:46:56 +00:00
let place = make_local_place(fx, local, layout, is_ssa);
assert_eq!(fx.local_map.push(place), local);
2018-07-19 17:33:42 +00:00
}
2018-08-14 16:52:43 +00:00
fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
2018-07-19 17:33:42 +00:00
}
struct CallArgument<'tcx> {
value: CValue<'tcx>,
is_owned: bool,
}
// FIXME avoid intermediate `CValue` before calling `adjust_arg_for_abi`
fn codegen_call_argument_operand<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
operand: &Operand<'tcx>,
) -> CallArgument<'tcx> {
CallArgument {
value: codegen_operand(fx, operand),
is_owned: matches!(operand, Operand::Move(_)),
}
}
pub(crate) fn codegen_terminator_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
source_info: mir::SourceInfo,
2018-07-19 17:33:42 +00:00
func: &Operand<'tcx>,
args: &[Spanned<Operand<'tcx>>],
destination: Place<'tcx>,
target: Option<BasicBlock>,
2018-07-20 11:51:34 +00:00
) {
let func = codegen_operand(fx, func);
let fn_sig = func.layout().ty.fn_sig(fx.tcx);
let ret_place = codegen_place(fx, destination);
2018-09-11 17:27:57 +00:00
// Handle special calls like intrinsics and empty drop glue.
let instance = if let ty::FnDef(def_id, fn_args) = *func.layout().ty.kind() {
2024-06-12 17:06:23 +00:00
let instance = ty::Instance::expect_resolve(
fx.tcx,
ty::TypingEnv::fully_monomorphized(),
2024-06-12 17:06:23 +00:00
def_id,
fn_args,
2024-07-01 20:32:32 +00:00
source_info.span,
2024-12-05 02:03:12 +00:00
);
if is_call_from_compiler_builtins_to_upstream_monomorphization(fx.tcx, instance) {
if target.is_some() {
let caller_def = fx.instance.def_id();
let e = CompilerBuiltinsCannotCall {
span: fx.tcx.def_span(caller_def),
caller: with_no_trimmed_paths!(fx.tcx.def_path_str(caller_def)),
callee: with_no_trimmed_paths!(fx.tcx.def_path_str(def_id)),
};
fx.tcx.dcx().emit_err(e);
} else {
fx.bcx.ins().trap(TrapCode::user(2).unwrap());
return;
}
}
if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
crate::intrinsics::codegen_llvm_intrinsic_call(
2019-08-31 17:28:09 +00:00
fx,
&fx.tcx.symbol_name(instance).name,
2019-08-31 17:28:09 +00:00
args,
ret_place,
target,
source_info.span,
2019-08-31 17:28:09 +00:00
);
return;
}
match instance.def {
2024-06-17 01:35:16 +00:00
InstanceKind::Intrinsic(_) => {
match crate::intrinsics::codegen_intrinsic_call(
fx,
instance,
args,
ret_place,
target,
source_info,
) {
Ok(()) => return,
Err(instance) => Some(instance),
}
}
2024-06-17 01:35:16 +00:00
InstanceKind::DropGlue(_, None) | ty::InstanceKind::AsyncDropGlueCtorShim(_, None) => {
// empty drop glue - a nop.
let dest = target.expect("Non terminating drop_in_place_real???");
2020-02-14 17:23:29 +00:00
let ret_block = fx.get_block(dest);
fx.bcx.ins().jump(ret_block, &[]);
return;
}
_ => Some(instance),
2018-09-11 17:27:57 +00:00
}
2020-04-13 17:12:44 +00:00
} else {
None
};
let extra_args = &args[fn_sig.inputs().skip_binder().len()..];
Rename many interner functions. (This is a large commit. The changes to `compiler/rustc_middle/src/ty/context.rs` are the most important ones.) The current naming scheme is a mess, with a mix of `_intern_`, `intern_` and `mk_` prefixes, with little consistency. In particular, in many cases it's easy to use an iterator interner when a (preferable) slice interner is available. The guiding principles of the new naming system: - No `_intern_` prefixes. - The `intern_` prefix is for internal operations. - The `mk_` prefix is for external operations. - For cases where there is a slice interner and an iterator interner, the former is `mk_foo` and the latter is `mk_foo_from_iter`. Also, `slice_interners!` and `direct_interners!` can now be `pub` or non-`pub`, which helps enforce the internal/external operations division. It's not perfect, but I think it's a clear improvement. The following lists show everything that was renamed. slice_interners - const_list - mk_const_list -> mk_const_list_from_iter - intern_const_list -> mk_const_list - substs - mk_substs -> mk_substs_from_iter - intern_substs -> mk_substs - check_substs -> check_and_mk_substs (this is a weird one) - canonical_var_infos - intern_canonical_var_infos -> mk_canonical_var_infos - poly_existential_predicates - mk_poly_existential_predicates -> mk_poly_existential_predicates_from_iter - intern_poly_existential_predicates -> mk_poly_existential_predicates - _intern_poly_existential_predicates -> intern_poly_existential_predicates - predicates - mk_predicates -> mk_predicates_from_iter - intern_predicates -> mk_predicates - _intern_predicates -> intern_predicates - projs - intern_projs -> mk_projs - place_elems - mk_place_elems -> mk_place_elems_from_iter - intern_place_elems -> mk_place_elems - bound_variable_kinds - mk_bound_variable_kinds -> mk_bound_variable_kinds_from_iter - intern_bound_variable_kinds -> mk_bound_variable_kinds direct_interners - region - intern_region (unchanged) - const - mk_const_internal -> intern_const - const_allocation - intern_const_alloc -> mk_const_alloc - layout - intern_layout -> mk_layout - adt_def - intern_adt_def -> mk_adt_def_from_data (unusual case, hard to avoid) - alloc_adt_def(!) -> mk_adt_def - external_constraints - intern_external_constraints -> mk_external_constraints Other - type_list - mk_type_list -> mk_type_list_from_iter - intern_type_list -> mk_type_list - tup - mk_tup -> mk_tup_from_iter - intern_tup -> mk_tup
2023-02-17 03:33:08 +00:00
let extra_args = fx.tcx.mk_type_list_from_iter(
extra_args.iter().map(|op_arg| fx.monomorphize(op_arg.node.ty(fx.mir, fx.tcx))),
Rename many interner functions. (This is a large commit. The changes to `compiler/rustc_middle/src/ty/context.rs` are the most important ones.) The current naming scheme is a mess, with a mix of `_intern_`, `intern_` and `mk_` prefixes, with little consistency. In particular, in many cases it's easy to use an iterator interner when a (preferable) slice interner is available. The guiding principles of the new naming system: - No `_intern_` prefixes. - The `intern_` prefix is for internal operations. - The `mk_` prefix is for external operations. - For cases where there is a slice interner and an iterator interner, the former is `mk_foo` and the latter is `mk_foo_from_iter`. Also, `slice_interners!` and `direct_interners!` can now be `pub` or non-`pub`, which helps enforce the internal/external operations division. It's not perfect, but I think it's a clear improvement. The following lists show everything that was renamed. slice_interners - const_list - mk_const_list -> mk_const_list_from_iter - intern_const_list -> mk_const_list - substs - mk_substs -> mk_substs_from_iter - intern_substs -> mk_substs - check_substs -> check_and_mk_substs (this is a weird one) - canonical_var_infos - intern_canonical_var_infos -> mk_canonical_var_infos - poly_existential_predicates - mk_poly_existential_predicates -> mk_poly_existential_predicates_from_iter - intern_poly_existential_predicates -> mk_poly_existential_predicates - _intern_poly_existential_predicates -> intern_poly_existential_predicates - predicates - mk_predicates -> mk_predicates_from_iter - intern_predicates -> mk_predicates - _intern_predicates -> intern_predicates - projs - intern_projs -> mk_projs - place_elems - mk_place_elems -> mk_place_elems_from_iter - intern_place_elems -> mk_place_elems - bound_variable_kinds - mk_bound_variable_kinds -> mk_bound_variable_kinds_from_iter - intern_bound_variable_kinds -> mk_bound_variable_kinds direct_interners - region - intern_region (unchanged) - const - mk_const_internal -> intern_const - const_allocation - intern_const_alloc -> mk_const_alloc - layout - intern_layout -> mk_layout - adt_def - intern_adt_def -> mk_adt_def_from_data (unusual case, hard to avoid) - alloc_adt_def(!) -> mk_adt_def - external_constraints - intern_external_constraints -> mk_external_constraints Other - type_list - mk_type_list -> mk_type_list_from_iter - intern_type_list -> mk_type_list - tup - mk_tup -> mk_tup_from_iter - intern_tup -> mk_tup
2023-02-17 03:33:08 +00:00
);
let fn_abi = if let Some(instance) = instance {
FullyMonomorphizedLayoutCx(fx.tcx).fn_abi_of_instance(instance, extra_args)
} else {
FullyMonomorphizedLayoutCx(fx.tcx).fn_abi_of_fn_ptr(fn_sig, extra_args)
};
let is_cold = if fn_sig.abi() == ExternAbi::RustCold {
true
} else {
instance.is_some_and(|inst| {
fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD)
})
};
if is_cold {
fx.bcx.set_cold_block(fx.bcx.current_block().unwrap());
if let Some(destination_block) = target {
fx.bcx.set_cold_block(fx.get_block(destination_block));
}
}
2019-07-28 09:24:33 +00:00
// Unpack arguments tuple for closures
let mut args = if fn_sig.abi() == ExternAbi::RustCall {
let (self_arg, pack_arg) = match args {
[pack_arg] => (None, codegen_call_argument_operand(fx, &pack_arg.node)),
[self_arg, pack_arg] => (
Some(codegen_call_argument_operand(fx, &self_arg.node)),
codegen_call_argument_operand(fx, &pack_arg.node),
),
_ => panic!("rust-call abi requires one or two arguments"),
};
2020-04-13 17:12:44 +00:00
let tupled_arguments = match pack_arg.value.layout().ty.kind() {
ty::Tuple(ref tupled_arguments) => tupled_arguments,
2019-07-28 09:24:33 +00:00
_ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
2020-04-13 17:12:44 +00:00
};
let mut args = Vec::with_capacity(1 + tupled_arguments.len());
args.extend(self_arg);
2020-04-13 17:12:44 +00:00
for i in 0..tupled_arguments.len() {
args.push(CallArgument {
value: pack_arg.value.value_field(fx, FieldIdx::new(i)),
is_owned: pack_arg.is_owned,
});
2019-07-28 09:24:33 +00:00
}
args
} else {
args.iter().map(|arg| codegen_call_argument_operand(fx, &arg.node)).collect::<Vec<_>>()
2019-07-28 09:24:33 +00:00
};
// Pass the caller location for `#[track_caller]`.
if instance.is_some_and(|inst| inst.def.requires_caller_location(fx.tcx)) {
let caller_location = fx.get_caller_location(source_info);
args.push(CallArgument { value: caller_location, is_owned: false });
}
let args = args;
assert_eq!(fn_abi.args.len(), args.len());
#[derive(Copy, Clone)]
enum CallTarget {
Direct(FuncRef),
Indirect(SigRef, Value),
}
let (func_ref, first_arg_override) = match instance {
// Trait object call
2024-06-17 01:35:16 +00:00
Some(Instance { def: InstanceKind::Virtual(_, idx), .. }) => {
if fx.clif_comments.enabled() {
2019-06-16 12:47:01 +00:00
let nop_inst = fx.bcx.ins().nop();
fx.add_comment(
nop_inst,
with_no_trimmed_paths!(format!(
"virtual call; self arg pass mode: {:?}",
fn_abi.args[0]
)),
2019-06-16 12:47:01 +00:00
);
}
let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0].value, idx);
let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
let sig = fx.bcx.import_signature(sig);
(CallTarget::Indirect(sig, method), Some(ptr.get_addr(fx)))
}
2018-09-08 16:00:06 +00:00
// Normal call
Some(instance) => {
let func_ref = fx.get_function_ref(instance);
(CallTarget::Direct(func_ref), None)
}
// Indirect call
None => {
if fx.clif_comments.enabled() {
2019-06-16 12:47:01 +00:00
let nop_inst = fx.bcx.ins().nop();
fx.add_comment(nop_inst, "indirect call");
}
let func = func.load_scalar(fx);
let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
let sig = fx.bcx.import_signature(sig);
(CallTarget::Indirect(sig, func), None)
2018-10-10 17:07:13 +00:00
}
2018-09-08 16:00:06 +00:00
};
self::returning::codegen_with_call_return_arg(fx, &fn_abi.ret, ret_place, |fx, return_ptr| {
let mut call_args = return_ptr
.into_iter()
.chain(first_arg_override.into_iter())
.chain(
args.into_iter()
.enumerate()
.skip(if first_arg_override.is_some() { 1 } else { 0 })
.flat_map(|(i, arg)| {
adjust_arg_for_abi(fx, arg.value, &fn_abi.args[i], arg.is_owned).into_iter()
}),
)
.collect::<Vec<Value>>();
// FIXME: Find a cleaner way to support varargs.
if fn_abi.c_variadic {
adjust_call_for_c_variadic(fx, &fn_abi, source_info, func_ref, &mut call_args);
}
if fx.clif_comments.enabled() {
let nop_inst = fx.bcx.ins().nop();
with_no_trimmed_paths!(fx.add_comment(nop_inst, format!("abi: {:?}", fn_abi)));
}
match func_ref {
CallTarget::Direct(func_ref) => fx.bcx.ins().call(func_ref, &call_args),
CallTarget::Indirect(sig, func_ptr) => {
fx.bcx.ins().call_indirect(sig, func_ptr, &call_args)
2020-01-11 15:49:42 +00:00
}
}
});
if let Some(dest) = target {
let ret_block = fx.get_block(dest);
fx.bcx.ins().jump(ret_block, &[]);
} else {
fx.bcx.ins().trap(TrapCode::user(1 /* unreachable */).unwrap());
}
fn adjust_call_for_c_variadic<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
source_info: mir::SourceInfo,
target: CallTarget,
call_args: &mut Vec<Value>,
) {
if fn_abi.conv != Conv::C {
fx.tcx.dcx().span_fatal(
source_info.span,
format!("Variadic call for non-C abi {:?}", fn_abi.conv),
);
}
let sig_ref = match target {
CallTarget::Direct(func_ref) => fx.bcx.func.dfg.ext_funcs[func_ref].signature,
CallTarget::Indirect(sig_ref, _) => sig_ref,
};
// `mem::take()` the `params` so that `fx.bcx` can be used below.
let mut abi_params = mem::take(&mut fx.bcx.func.dfg.signatures[sig_ref].params);
// Recalculate the parameters in the signature to ensure the signature contains the variadic arguments.
let has_return_arg = matches!(fn_abi.ret.mode, PassMode::Indirect { .. });
// Drop everything except the return argument (if there is one).
abi_params.truncate(if has_return_arg { 1 } else { 0 });
// Add the fixed arguments.
abi_params.extend(
fn_abi.args[..fn_abi.fixed_count as usize]
.iter()
.flat_map(|arg_abi| arg_abi.get_abi_param(fx.tcx).into_iter()),
);
let fixed_arg_count = abi_params.len();
// Add the variadic arguments.
abi_params.extend(
fn_abi.args[fn_abi.fixed_count as usize..]
.iter()
.flat_map(|arg_abi| arg_abi.get_abi_param(fx.tcx).into_iter()),
);
if fx.tcx.sess.target.is_like_osx && fx.tcx.sess.target.arch == "aarch64" {
// Add any padding arguments needed for Apple AArch64.
// There's no need to pad the argument list unless variadic arguments are actually being
// passed.
if abi_params.len() > fixed_arg_count {
// 128-bit integers take 2 registers, and everything else takes 1.
// FIXME: Add support for non-integer types
// This relies on the checks below to ensure all arguments are integer types and
// that the ABI is "C".
// The return argument isn't counted as it goes in its own dedicated register.
let integer_registers_used: usize = abi_params
[if has_return_arg { 1 } else { 0 }..fixed_arg_count]
.iter()
.map(|arg| if arg.value_type.bits() == 128 { 2 } else { 1 })
.sum();
// The ABI uses 8 registers before it starts pushing arguments to the stack. Pad out
// the registers if needed to ensure the variadic arguments are passed on the stack.
if integer_registers_used < 8 {
abi_params.splice(
fixed_arg_count..fixed_arg_count,
(integer_registers_used..8).map(|_| AbiParam::new(types::I64)),
);
call_args.splice(
fixed_arg_count..fixed_arg_count,
(integer_registers_used..8).map(|_| fx.bcx.ins().iconst(types::I64, 0)),
);
}
}
2020-01-11 15:49:42 +00:00
// `StructArgument` is not currently used by the `aarch64` ABI, and is therefore not
// handled when calculating how many padding arguments to use. Assert that this remains
// the case.
assert!(abi_params.iter().all(|param| matches!(
param.purpose,
// The only purposes used are `Normal` and `StructReturn`.
ArgumentPurpose::Normal | ArgumentPurpose::StructReturn
)));
}
// Check all parameters are integers.
for param in abi_params.iter() {
if !param.value_type.is_int() {
// FIXME: Set %al to upperbound on float args once floats are supported.
fx.tcx.dcx().span_fatal(
source_info.span,
format!("Non int ty {:?} for variadic call", param.value_type),
);
}
2019-02-11 18:18:52 +00:00
}
assert_eq!(abi_params.len(), call_args.len());
// Put the `AbiParam`s back in the signature.
fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
}
2018-07-19 17:33:42 +00:00
}
2018-08-10 17:20:13 +00:00
pub(crate) fn codegen_drop<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
source_info: mir::SourceInfo,
2020-01-11 15:49:42 +00:00
drop_place: CPlace<'tcx>,
target: BasicBlock,
2020-01-11 15:49:42 +00:00
) {
2019-06-16 13:57:53 +00:00
let ty = drop_place.layout().ty;
2024-12-05 02:03:12 +00:00
let drop_instance = Instance::resolve_drop_in_place(fx.tcx, ty);
2019-02-07 19:45:15 +00:00
2024-06-17 01:35:16 +00:00
if let ty::InstanceKind::DropGlue(_, None) | ty::InstanceKind::AsyncDropGlueCtorShim(_, None) =
drop_instance.def
{
2019-06-16 13:57:53 +00:00
// we don't actually need to drop anything
} else {
match ty.kind() {
ty::Dynamic(_, _, ty::Dyn) => {
// IN THIS ARM, WE HAVE:
// ty = *mut (dyn Trait)
// which is: exists<T> ( *mut T, Vtable<T: Trait> )
// args[0] args[1]
//
// args = ( Data, Vtable )
// |
// v
// /-------\
// | ... |
// \-------/
//
let (ptr, vtable) = drop_place.to_ptr_unsized();
let ptr = ptr.get_addr(fx);
let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable);
2019-02-07 19:45:15 +00:00
let is_null = fx.bcx.ins().icmp_imm(IntCC::Equal, drop_fn, 0);
let target_block = fx.get_block(target);
let continued = fx.bcx.create_block();
fx.bcx.ins().brif(is_null, target_block, &[], continued, &[]);
fx.bcx.switch_to_block(continued);
// FIXME(eddyb) perhaps move some of this logic into
// `Instance::resolve_drop_in_place`?
let virtual_drop = Instance {
2024-06-17 01:35:16 +00:00
def: ty::InstanceKind::Virtual(drop_instance.def_id(), 0),
args: drop_instance.args,
};
let fn_abi = FullyMonomorphizedLayoutCx(fx.tcx)
.fn_abi_of_instance(virtual_drop, ty::List::empty());
let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
let sig = fx.bcx.import_signature(sig);
2019-06-16 13:57:53 +00:00
fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
}
ty::Dynamic(_, _, ty::DynStar) => {
// IN THIS ARM, WE HAVE:
// ty = *mut (dyn* Trait)
// which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>)
//
// args = [ * ]
// |
// v
// ( Data, Vtable )
// |
// v
// /-------\
// | ... |
// \-------/
//
//
// WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING
//
// data = &(*args[0]).0 // gives a pointer to Data above (really the same pointer)
// vtable = (*args[0]).1 // loads the vtable out
// (data, vtable) // an equivalent Rust `*mut dyn Trait`
//
// SO THEN WE CAN USE THE ABOVE CODE.
let (data, vtable) = drop_place.to_cvalue(fx).dyn_star_force_data_on_stack(fx);
let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable);
let is_null = fx.bcx.ins().icmp_imm(IntCC::Equal, drop_fn, 0);
let target_block = fx.get_block(target);
let continued = fx.bcx.create_block();
fx.bcx.ins().brif(is_null, target_block, &[], continued, &[]);
fx.bcx.switch_to_block(continued);
let virtual_drop = Instance {
2024-06-17 01:35:16 +00:00
def: ty::InstanceKind::Virtual(drop_instance.def_id(), 0),
args: drop_instance.args,
};
let fn_abi = FullyMonomorphizedLayoutCx(fx.tcx)
.fn_abi_of_instance(virtual_drop, ty::List::empty());
let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
let sig = fx.bcx.import_signature(sig);
fx.bcx.ins().call_indirect(sig, drop_fn, &[data]);
}
2019-06-16 13:57:53 +00:00
_ => {
2024-06-17 01:35:16 +00:00
assert!(!matches!(drop_instance.def, InstanceKind::Virtual(_, _)));
let fn_abi = FullyMonomorphizedLayoutCx(fx.tcx)
.fn_abi_of_instance(drop_instance, ty::List::empty());
let arg_value = drop_place.place_ref(
fx,
fx.layout_of(Ty::new_mut_ref(fx.tcx, fx.tcx.lifetimes.re_erased, ty)),
);
let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0], true);
let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
if drop_instance.def.requires_caller_location(fx.tcx) {
// Pass the caller location for `#[track_caller]`.
let caller_location = fx.get_caller_location(source_info);
call_args.extend(
adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1], false).into_iter(),
);
}
let func_ref = fx.get_function_ref(drop_instance);
fx.bcx.ins().call(func_ref, &call_args);
2019-06-16 13:57:53 +00:00
}
}
}
let target_block = fx.get_block(target);
fx.bcx.ins().jump(target_block, &[]);
2019-02-07 19:45:15 +00:00
}