Refactor call terminator to always hold a destination place

This commit is contained in:
Jakob Degen 2022-04-16 09:27:54 -04:00
parent d9025cebac
commit 5011ae46a1
6 changed files with 66 additions and 71 deletions

View File

@ -312,13 +312,14 @@ pub(crate) fn codegen_terminator_call<'tcx>(
source_info: mir::SourceInfo, source_info: mir::SourceInfo,
func: &Operand<'tcx>, func: &Operand<'tcx>,
args: &[Operand<'tcx>], args: &[Operand<'tcx>],
mir_dest: Option<(Place<'tcx>, BasicBlock)>, destination: Place<'tcx>,
target: Option<BasicBlock>,
) { ) {
let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx)); let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
let fn_sig = let fn_sig =
fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx)); fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
let destination = mir_dest.map(|(place, bb)| (codegen_place(fx, place), bb)); let ret_place = codegen_place(fx, destination);
// Handle special calls like instrinsics and empty drop glue. // Handle special calls like instrinsics and empty drop glue.
let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() { let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
@ -333,7 +334,8 @@ pub(crate) fn codegen_terminator_call<'tcx>(
&fx.tcx.symbol_name(instance).name, &fx.tcx.symbol_name(instance).name,
substs, substs,
args, args,
destination, ret_place,
target,
); );
return; return;
} }
@ -344,14 +346,15 @@ pub(crate) fn codegen_terminator_call<'tcx>(
fx, fx,
instance, instance,
args, args,
destination, ret_place,
target,
source_info, source_info,
); );
return; return;
} }
InstanceDef::DropGlue(_, None) => { InstanceDef::DropGlue(_, None) => {
// empty drop glue - a nop. // empty drop glue - a nop.
let (_, dest) = destination.expect("Non terminating drop_in_place_real???"); let dest = target.expect("Non terminating drop_in_place_real???");
let ret_block = fx.get_block(dest); let ret_block = fx.get_block(dest);
fx.bcx.ins().jump(ret_block, &[]); fx.bcx.ins().jump(ret_block, &[]);
return; return;
@ -377,7 +380,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
.unwrap_or(false); .unwrap_or(false);
if is_cold { if is_cold {
fx.bcx.set_cold_block(fx.bcx.current_block().unwrap()); fx.bcx.set_cold_block(fx.bcx.current_block().unwrap());
if let Some((_place, destination_block)) = destination { if let Some(destination_block) = target {
fx.bcx.set_cold_block(fx.get_block(destination_block)); fx.bcx.set_cold_block(fx.get_block(destination_block));
} }
} }
@ -459,7 +462,6 @@ pub(crate) fn codegen_terminator_call<'tcx>(
} }
}; };
let ret_place = destination.map(|(place, _)| place);
self::returning::codegen_with_call_return_arg(fx, &fn_abi.ret, ret_place, |fx, return_ptr| { self::returning::codegen_with_call_return_arg(fx, &fn_abi.ret, ret_place, |fx, return_ptr| {
let call_args = return_ptr let call_args = return_ptr
.into_iter() .into_iter()
@ -511,7 +513,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
call_inst call_inst
}); });
if let Some((_, dest)) = destination { if let Some(dest) = target {
let ret_block = fx.get_block(dest); let ret_block = fx.get_block(dest);
fx.bcx.ins().jump(ret_block, &[]); fx.bcx.ins().jump(ret_block, &[]);
} else { } else {

View File

@ -56,23 +56,22 @@ pub(super) fn codegen_return_param<'tcx>(
pub(super) fn codegen_with_call_return_arg<'tcx>( pub(super) fn codegen_with_call_return_arg<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>, fx: &mut FunctionCx<'_, '_, 'tcx>,
ret_arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, ret_arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
ret_place: Option<CPlace<'tcx>>, ret_place: CPlace<'tcx>,
f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> Inst, f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> Inst,
) { ) {
let (ret_temp_place, return_ptr) = match ret_arg_abi.mode { let (ret_temp_place, return_ptr) = match ret_arg_abi.mode {
PassMode::Ignore => (None, None), PassMode::Ignore => (None, None),
PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => match ret_place { PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
Some(ret_place) if matches!(ret_place.inner(), CPlaceInner::Addr(_, None)) => { if matches!(ret_place.inner(), CPlaceInner::Addr(_, None)) {
// This is an optimization to prevent unnecessary copies of the return value when // This is an optimization to prevent unnecessary copies of the return value when
// the return place is already a memory place as opposed to a register. // the return place is already a memory place as opposed to a register.
// This match arm can be safely removed. // This match arm can be safely removed.
(None, Some(ret_place.to_ptr().get_addr(fx))) (None, Some(ret_place.to_ptr().get_addr(fx)))
} } else {
_ => {
let place = CPlace::new_stack_slot(fx, ret_arg_abi.layout); let place = CPlace::new_stack_slot(fx, ret_arg_abi.layout);
(Some(place), Some(place.to_ptr().get_addr(fx))) (Some(place), Some(place.to_ptr().get_addr(fx)))
} }
}, }
PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => { PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
unreachable!("unsized return value") unreachable!("unsized return value")
} }
@ -84,39 +83,25 @@ pub(super) fn codegen_with_call_return_arg<'tcx>(
match ret_arg_abi.mode { match ret_arg_abi.mode {
PassMode::Ignore => {} PassMode::Ignore => {}
PassMode::Direct(_) => { PassMode::Direct(_) => {
if let Some(ret_place) = ret_place {
let ret_val = fx.bcx.inst_results(call_inst)[0]; let ret_val = fx.bcx.inst_results(call_inst)[0];
ret_place.write_cvalue(fx, CValue::by_val(ret_val, ret_arg_abi.layout)); ret_place.write_cvalue(fx, CValue::by_val(ret_val, ret_arg_abi.layout));
} }
}
PassMode::Pair(_, _) => { PassMode::Pair(_, _) => {
if let Some(ret_place) = ret_place {
let ret_val_a = fx.bcx.inst_results(call_inst)[0]; let ret_val_a = fx.bcx.inst_results(call_inst)[0];
let ret_val_b = fx.bcx.inst_results(call_inst)[1]; let ret_val_b = fx.bcx.inst_results(call_inst)[1];
ret_place.write_cvalue( ret_place
fx, .write_cvalue(fx, CValue::by_val_pair(ret_val_a, ret_val_b, ret_arg_abi.layout));
CValue::by_val_pair(ret_val_a, ret_val_b, ret_arg_abi.layout),
);
}
} }
PassMode::Cast(cast) => { PassMode::Cast(cast) => {
if let Some(ret_place) = ret_place { let results =
let results = fx fx.bcx.inst_results(call_inst).iter().copied().collect::<SmallVec<[Value; 2]>>();
.bcx
.inst_results(call_inst)
.iter()
.copied()
.collect::<SmallVec<[Value; 2]>>();
let result = let result =
super::pass_mode::from_casted_value(fx, &results, ret_place.layout(), cast); super::pass_mode::from_casted_value(fx, &results, ret_place.layout(), cast);
ret_place.write_cvalue(fx, result); ret_place.write_cvalue(fx, result);
} }
}
PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => { PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
if let (Some(ret_place), Some(ret_temp_place)) = (ret_place, ret_temp_place) { if let Some(ret_temp_place) = ret_temp_place {
// Both ret_place and ret_temp_place must be Some. If ret_place is None, this is // If ret_temp_place is None, it is not necessary to copy the return value.
// a non-returning call. If ret_temp_place is None, it is not necessary to copy the
// return value.
let ret_temp_value = ret_temp_place.to_cvalue(fx); let ret_temp_value = ret_temp_place.to_cvalue(fx);
ret_place.write_cvalue(fx, ret_temp_value); ret_place.write_cvalue(fx, ret_temp_value);
} }

View File

@ -393,6 +393,7 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
func, func,
args, args,
destination, destination,
target,
fn_span, fn_span,
cleanup: _, cleanup: _,
from_hir_call: _, from_hir_call: _,
@ -404,6 +405,7 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
func, func,
args, args,
*destination, *destination,
*target,
) )
}); });
} }

View File

@ -542,8 +542,8 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
| TerminatorKind::FalseEdge { .. } | TerminatorKind::FalseEdge { .. }
| TerminatorKind::FalseUnwind { .. } => unreachable!(), | TerminatorKind::FalseUnwind { .. } => unreachable!(),
TerminatorKind::InlineAsm { .. } => return None, TerminatorKind::InlineAsm { .. } => return None,
TerminatorKind::Call { destination: Some((call_place, _)), .. } TerminatorKind::Call { destination, target: Some(_), .. }
if call_place == place => if destination == place =>
{ {
return None; return None;
} }

View File

@ -10,10 +10,9 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
intrinsic: &str, intrinsic: &str,
_substs: SubstsRef<'tcx>, _substs: SubstsRef<'tcx>,
args: &[mir::Operand<'tcx>], args: &[mir::Operand<'tcx>],
destination: Option<(CPlace<'tcx>, BasicBlock)>, ret: CPlace<'tcx>,
target: Option<BasicBlock>,
) { ) {
let ret = destination.unwrap().0;
intrinsic_match! { intrinsic_match! {
fx, intrinsic, args, fx, intrinsic, args,
_ => { _ => {
@ -126,7 +125,7 @@ pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
}; };
} }
let dest = destination.expect("all llvm intrinsics used by stdlib should return").1; let dest = target.expect("all llvm intrinsics used by stdlib should return");
let ret_block = fx.get_block(dest); let ret_block = fx.get_block(dest);
fx.bcx.ins().jump(ret_block, &[]); fx.bcx.ins().jump(ret_block, &[]);
} }

View File

@ -217,15 +217,16 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>, fx: &mut FunctionCx<'_, '_, 'tcx>,
instance: Instance<'tcx>, instance: Instance<'tcx>,
args: &[mir::Operand<'tcx>], args: &[mir::Operand<'tcx>],
destination: Option<(CPlace<'tcx>, BasicBlock)>, destination: CPlace<'tcx>,
target: Option<BasicBlock>,
source_info: mir::SourceInfo, source_info: mir::SourceInfo,
) { ) {
let intrinsic = fx.tcx.item_name(instance.def_id()); let intrinsic = fx.tcx.item_name(instance.def_id());
let substs = instance.substs; let substs = instance.substs;
let ret = match destination { let target = if let Some(target) = target {
Some((place, _)) => place, target
None => { } else {
// Insert non returning intrinsics here // Insert non returning intrinsics here
match intrinsic { match intrinsic {
sym::abort => { sym::abort => {
@ -237,15 +238,21 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
_ => unimplemented!("unsupported instrinsic {}", intrinsic), _ => unimplemented!("unsupported instrinsic {}", intrinsic),
} }
return; return;
}
}; };
if intrinsic.as_str().starts_with("simd_") { if intrinsic.as_str().starts_with("simd_") {
self::simd::codegen_simd_intrinsic_call(fx, intrinsic, substs, args, ret, source_info.span); self::simd::codegen_simd_intrinsic_call(
let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1); fx,
intrinsic,
substs,
args,
destination,
source_info.span,
);
let ret_block = fx.get_block(target);
fx.bcx.ins().jump(ret_block, &[]); fx.bcx.ins().jump(ret_block, &[]);
} else if codegen_float_intrinsic_call(fx, intrinsic, args, ret) { } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
let ret_block = fx.get_block(destination.expect("Float intrinsics don't diverge").1); let ret_block = fx.get_block(target);
fx.bcx.ins().jump(ret_block, &[]); fx.bcx.ins().jump(ret_block, &[]);
} else { } else {
codegen_regular_intrinsic_call( codegen_regular_intrinsic_call(
@ -254,9 +261,9 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
intrinsic, intrinsic,
substs, substs,
args, args,
ret,
source_info,
destination, destination,
Some(target),
source_info,
); );
} }
} }
@ -339,8 +346,8 @@ fn codegen_regular_intrinsic_call<'tcx>(
substs: SubstsRef<'tcx>, substs: SubstsRef<'tcx>,
args: &[mir::Operand<'tcx>], args: &[mir::Operand<'tcx>],
ret: CPlace<'tcx>, ret: CPlace<'tcx>,
destination: Option<BasicBlock>,
source_info: mir::SourceInfo, source_info: mir::SourceInfo,
destination: Option<(CPlace<'tcx>, BasicBlock)>,
) { ) {
let usize_layout = fx.layout_of(fx.tcx.types.usize); let usize_layout = fx.layout_of(fx.tcx.types.usize);
@ -761,7 +768,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
if fx.tcx.is_compiler_builtins(LOCAL_CRATE) { if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
// special case for compiler-builtins to avoid having to patch it // special case for compiler-builtins to avoid having to patch it
crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported"); crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
let ret_block = fx.get_block(destination.unwrap().1); let ret_block = fx.get_block(destination.unwrap());
fx.bcx.ins().jump(ret_block, &[]); fx.bcx.ins().jump(ret_block, &[]);
return; return;
} else { } else {
@ -789,7 +796,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
if fx.tcx.is_compiler_builtins(LOCAL_CRATE) { if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
// special case for compiler-builtins to avoid having to patch it // special case for compiler-builtins to avoid having to patch it
crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported"); crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
let ret_block = fx.get_block(destination.unwrap().1); let ret_block = fx.get_block(destination.unwrap());
fx.bcx.ins().jump(ret_block, &[]); fx.bcx.ins().jump(ret_block, &[]);
return; return;
} else { } else {
@ -1130,6 +1137,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
}; };
} }
let ret_block = fx.get_block(destination.unwrap().1); let ret_block = fx.get_block(destination.unwrap());
fx.bcx.ins().jump(ret_block, &[]); fx.bcx.ins().jump(ret_block, &[]);
} }