fmt: Run cargo fmt since it is available

This commit is contained in:
CohenArthur 2020-08-28 12:10:48 +02:00
parent 5dec38e94c
commit 4e685a512e
40 changed files with 1569 additions and 790 deletions

View File

@ -80,7 +80,10 @@ pub(super) fn add_local_place_comments<'tcx>(
} }
CPlaceInner::VarPair(place_local, var1, var2) => { CPlaceInner::VarPair(place_local, var1, var2) => {
assert_eq!(local, place_local); assert_eq!(local, place_local);
("ssa", Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index()))) (
"ssa",
Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())),
)
} }
CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(), CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
CPlaceInner::Addr(ptr, meta) => { CPlaceInner::Addr(ptr, meta) => {
@ -90,15 +93,18 @@ pub(super) fn add_local_place_comments<'tcx>(
Cow::Borrowed("") Cow::Borrowed("")
}; };
match ptr.base_and_offset() { match ptr.base_and_offset() {
(crate::pointer::PointerBase::Addr(addr), offset) => { (crate::pointer::PointerBase::Addr(addr), offset) => (
("reuse", format!("storage={}{}{}", addr, offset, meta).into()) "reuse",
} format!("storage={}{}{}", addr, offset, meta).into(),
(crate::pointer::PointerBase::Stack(stack_slot), offset) => { ),
("stack", format!("storage={}{}{}", stack_slot, offset, meta).into()) (crate::pointer::PointerBase::Stack(stack_slot), offset) => (
} "stack",
(crate::pointer::PointerBase::Dangling(align), offset) => { format!("storage={}{}{}", stack_slot, offset, meta).into(),
("zst", format!("align={},offset={}", align.bytes(), offset).into()) ),
} (crate::pointer::PointerBase::Dangling(align), offset) => (
"zst",
format!("align={},offset={}", align.bytes(), offset).into(),
),
} }
} }
}; };
@ -111,7 +117,11 @@ pub(super) fn add_local_place_comments<'tcx>(
size.bytes(), size.bytes(),
align.abi.bytes(), align.abi.bytes(),
align.pref.bytes(), align.pref.bytes(),
if extra.is_empty() { "" } else { " " }, if extra.is_empty() {
""
} else {
" "
},
extra, extra,
)); ));
} }

View File

@ -3,8 +3,8 @@ mod comments;
mod pass_mode; mod pass_mode;
mod returning; mod returning;
use rustc_target::spec::abi::Abi;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_target::spec::abi::Abi;
use cranelift_codegen::ir::{AbiParam, ArgumentPurpose}; use cranelift_codegen::ir::{AbiParam, ArgumentPurpose};
@ -14,7 +14,10 @@ use crate::prelude::*;
pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return}; pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return};
// Copied from https://github.com/rust-lang/rust/blob/f52c72948aa1dd718cc1f168d21c91c584c0a662/src/librustc_middle/ty/layout.rs#L2301 // Copied from https://github.com/rust-lang/rust/blob/f52c72948aa1dd718cc1f168d21c91c584c0a662/src/librustc_middle/ty/layout.rs#L2301
pub(crate) fn fn_sig_for_fn_abi<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> ty::PolyFnSig<'tcx> { pub(crate) fn fn_sig_for_fn_abi<'tcx>(
tcx: TyCtxt<'tcx>,
instance: Instance<'tcx>,
) -> ty::PolyFnSig<'tcx> {
use rustc_middle::ty::subst::Subst; use rustc_middle::ty::subst::Subst;
// FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function. // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
@ -70,10 +73,10 @@ pub(crate) fn fn_sig_for_fn_abi<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx
let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs); let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
sig.map_bound(|sig| { sig.map_bound(|sig| {
let state_did = tcx.require_lang_item(rustc_hir::LangItem::GeneratorStateLangItem, None); let state_did =
tcx.require_lang_item(rustc_hir::LangItem::GeneratorStateLangItem, None);
let state_adt_ref = tcx.adt_def(state_did); let state_adt_ref = tcx.adt_def(state_did);
let state_substs = let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
tcx.mk_fn_sig( tcx.mk_fn_sig(
@ -102,8 +105,16 @@ fn clif_sig_from_fn_sig<'tcx>(
abi => abi, abi => abi,
}; };
let (call_conv, inputs, output): (CallConv, Vec<Ty<'tcx>>, Ty<'tcx>) = match abi { let (call_conv, inputs, output): (CallConv, Vec<Ty<'tcx>>, Ty<'tcx>) = match abi {
Abi::Rust => (CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output()), Abi::Rust => (
Abi::C | Abi::Unadjusted => (CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output()), CallConv::triple_default(triple),
sig.inputs().to_vec(),
sig.output(),
),
Abi::C | Abi::Unadjusted => (
CallConv::triple_default(triple),
sig.inputs().to_vec(),
sig.output(),
),
Abi::SysV64 => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()), Abi::SysV64 => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()),
Abi::RustCall => { Abi::RustCall => {
assert_eq!(sig.inputs().len(), 2); assert_eq!(sig.inputs().len(), 2);
@ -116,7 +127,11 @@ fn clif_sig_from_fn_sig<'tcx>(
(CallConv::triple_default(triple), inputs, sig.output()) (CallConv::triple_default(triple), inputs, sig.output())
} }
Abi::System => unreachable!(), Abi::System => unreachable!(),
Abi::RustIntrinsic => (CallConv::triple_default(triple), sig.inputs().to_vec(), sig.output()), Abi::RustIntrinsic => (
CallConv::triple_default(triple),
sig.inputs().to_vec(),
sig.output(),
),
_ => unimplemented!("unsupported abi {:?}", sig.abi), _ => unimplemented!("unsupported abi {:?}", sig.abi),
}; };
@ -163,10 +178,7 @@ fn clif_sig_from_fn_sig<'tcx>(
tcx.layout_of(ParamEnv::reveal_all().and(output)).unwrap(), tcx.layout_of(ParamEnv::reveal_all().and(output)).unwrap(),
) { ) {
PassMode::NoPass => (inputs.collect(), vec![]), PassMode::NoPass => (inputs.collect(), vec![]),
PassMode::ByVal(ret_ty) => ( PassMode::ByVal(ret_ty) => (inputs.collect(), vec![AbiParam::new(ret_ty)]),
inputs.collect(),
vec![AbiParam::new(ret_ty)],
),
PassMode::ByValPair(ret_ty_a, ret_ty_b) => ( PassMode::ByValPair(ret_ty_a, ret_ty_b) => (
inputs.collect(), inputs.collect(),
vec![AbiParam::new(ret_ty_a), AbiParam::new(ret_ty_b)], vec![AbiParam::new(ret_ty_a), AbiParam::new(ret_ty_b)],
@ -202,12 +214,24 @@ pub(crate) fn get_function_name_and_sig<'tcx>(
support_vararg: bool, support_vararg: bool,
) -> (String, Signature) { ) -> (String, Signature) {
assert!(!inst.substs.needs_infer()); assert!(!inst.substs.needs_infer());
let fn_sig = let fn_sig = tcx.normalize_erasing_late_bound_regions(
tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &fn_sig_for_fn_abi(tcx, inst)); ParamEnv::reveal_all(),
&fn_sig_for_fn_abi(tcx, inst),
);
if fn_sig.c_variadic && !support_vararg { if fn_sig.c_variadic && !support_vararg {
tcx.sess.span_fatal(tcx.def_span(inst.def_id()), "Variadic function definitions are not yet supported"); tcx.sess.span_fatal(
tcx.def_span(inst.def_id()),
"Variadic function definitions are not yet supported",
);
} }
let sig = clif_sig_from_fn_sig(tcx, triple, fn_sig, tcx.def_span(inst.def_id()), false, inst.def.requires_caller_location(tcx)); let sig = clif_sig_from_fn_sig(
tcx,
triple,
fn_sig,
tcx.def_span(inst.def_id()),
false,
inst.def.requires_caller_location(tcx),
);
(tcx.symbol_name(inst).name.to_string(), sig) (tcx.symbol_name(inst).name.to_string(), sig)
} }
@ -228,7 +252,8 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef { pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
let func_id = import_function(self.tcx, &mut self.cx.module, inst); let func_id = import_function(self.tcx, &mut self.cx.module, inst);
let func_ref = self let func_ref = self
.cx.module .cx
.module
.declare_func_in_func(func_id, &mut self.bcx.func); .declare_func_in_func(func_id, &mut self.bcx.func);
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
@ -250,11 +275,13 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
call_conv: CallConv::triple_default(self.triple()), call_conv: CallConv::triple_default(self.triple()),
}; };
let func_id = self let func_id = self
.cx.module .cx
.module
.declare_function(&name, Linkage::Import, &sig) .declare_function(&name, Linkage::Import, &sig)
.unwrap(); .unwrap();
let func_ref = self let func_ref = self
.cx.module .cx
.module
.declare_func_in_func(func_id, &mut self.bcx.func); .declare_func_in_func(func_id, &mut self.bcx.func);
let call_inst = self.bcx.ins().call(func_ref, args); let call_inst = self.bcx.ins().call(func_ref, args);
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
@ -376,7 +403,9 @@ pub(crate) fn codegen_fn_prelude<'tcx>(
assert!(fx.caller_location.is_none()); assert!(fx.caller_location.is_none());
if fx.instance.def.requires_caller_location(fx.tcx) { if fx.instance.def.requires_caller_location(fx.tcx) {
// Store caller location for `#[track_caller]`. // Store caller location for `#[track_caller]`.
fx.caller_location = Some(cvalue_for_param(fx, start_block, None, None, fx.tcx.caller_location_ty()).unwrap()); fx.caller_location = Some(
cvalue_for_param(fx, start_block, None, None, fx.tcx.caller_location_ty()).unwrap(),
);
} }
fx.bcx.switch_to_block(start_block); fx.bcx.switch_to_block(start_block);
@ -502,17 +531,20 @@ pub(crate) fn codegen_terminator_call<'tcx>(
fx.bcx.ins().jump(ret_block, &[]); fx.bcx.ins().jump(ret_block, &[]);
return; return;
} }
_ => Some(instance) _ => Some(instance),
} }
} else { } else {
None None
}; };
let is_cold = let is_cold = instance
instance.map(|inst| .map(|inst| {
fx.tcx.codegen_fn_attrs(inst.def_id()) fx.tcx
.flags.contains(CodegenFnAttrFlags::COLD)) .codegen_fn_attrs(inst.def_id())
.unwrap_or(false); .flags
.contains(CodegenFnAttrFlags::COLD)
})
.unwrap_or(false);
if is_cold { if is_cold {
fx.cold_blocks.insert(current_block); fx.cold_blocks.insert(current_block);
} }
@ -524,9 +556,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
let pack_arg = trans_operand(fx, &args[1]); let pack_arg = trans_operand(fx, &args[1]);
let tupled_arguments = match pack_arg.layout().ty.kind { let tupled_arguments = match pack_arg.layout().ty.kind {
ty::Tuple(ref tupled_arguments) => { ty::Tuple(ref tupled_arguments) => tupled_arguments,
tupled_arguments
}
_ => bug!("argument to function with \"rust-call\" ABI is not a tuple"), _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
}; };
@ -582,8 +612,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
let nop_inst = fx.bcx.ins().nop(); let nop_inst = fx.bcx.ins().nop();
fx.add_comment(nop_inst, "indirect call"); fx.add_comment(nop_inst, "indirect call");
} }
let func = trans_operand(fx, func) let func = trans_operand(fx, func).load_scalar(fx);
.load_scalar(fx);
( (
Some(func), Some(func),
args.get(0) args.get(0)
@ -608,7 +637,10 @@ pub(crate) fn codegen_terminator_call<'tcx>(
) )
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) { if instance
.map(|inst| inst.def.requires_caller_location(fx.tcx))
.unwrap_or(false)
{
// Pass the caller location for `#[track_caller]`. // Pass the caller location for `#[track_caller]`.
let caller_location = fx.get_caller_location(span); let caller_location = fx.get_caller_location(span);
call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter()); call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter());
@ -637,7 +669,10 @@ pub(crate) fn codegen_terminator_call<'tcx>(
// FIXME find a cleaner way to support varargs // FIXME find a cleaner way to support varargs
if fn_sig.c_variadic { if fn_sig.c_variadic {
if fn_sig.abi != Abi::C { if fn_sig.abi != Abi::C {
fx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi)); fx.tcx.sess.span_fatal(
span,
&format!("Variadic call for non-C abi {:?}", fn_sig.abi),
);
} }
let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap(); let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
let abi_params = call_args let abi_params = call_args
@ -646,7 +681,9 @@ pub(crate) fn codegen_terminator_call<'tcx>(
let ty = fx.bcx.func.dfg.value_type(arg); let ty = fx.bcx.func.dfg.value_type(arg);
if !ty.is_int() { if !ty.is_int() {
// FIXME set %al to upperbound on float args once floats are supported // FIXME set %al to upperbound on float args once floats are supported
fx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty)); fx.tcx
.sess
.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
} }
AbiParam::new(ty) AbiParam::new(ty)
}) })
@ -700,13 +737,16 @@ pub(crate) fn codegen_drop<'tcx>(
_ => { _ => {
assert!(!matches!(drop_fn.def, InstanceDef::Virtual(_, _))); assert!(!matches!(drop_fn.def, InstanceDef::Virtual(_, _)));
let arg_value = drop_place.place_ref(fx, fx.layout_of(fx.tcx.mk_ref( let arg_value = drop_place.place_ref(
&ty::RegionKind::ReErased, fx,
TypeAndMut { fx.layout_of(fx.tcx.mk_ref(
ty, &ty::RegionKind::ReErased,
mutbl: crate::rustc_hir::Mutability::Mut, TypeAndMut {
}, ty,
))); mutbl: crate::rustc_hir::Mutability::Mut,
},
)),
);
let arg_value = adjust_arg_for_abi(fx, arg_value); let arg_value = adjust_arg_for_abi(fx, arg_value);
let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>(); let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();

View File

@ -83,9 +83,7 @@ pub(super) fn get_pass_mode<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>)
} else { } else {
match &layout.abi { match &layout.abi {
Abi::Uninhabited => PassMode::NoPass, Abi::Uninhabited => PassMode::NoPass,
Abi::Scalar(scalar) => { Abi::Scalar(scalar) => PassMode::ByVal(scalar_to_clif_type(tcx, scalar.clone())),
PassMode::ByVal(scalar_to_clif_type(tcx, scalar.clone()))
}
Abi::ScalarPair(a, b) => { Abi::ScalarPair(a, b) => {
let a = scalar_to_clif_type(tcx, a.clone()); let a = scalar_to_clif_type(tcx, a.clone());
let b = scalar_to_clif_type(tcx, b.clone()); let b = scalar_to_clif_type(tcx, b.clone());
@ -93,7 +91,9 @@ pub(super) fn get_pass_mode<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>)
// Returning (i128, i128) by-val-pair would take 4 regs, while only 3 are // Returning (i128, i128) by-val-pair would take 4 regs, while only 3 are
// available on x86_64. Cranelift gets confused when too many return params // available on x86_64. Cranelift gets confused when too many return params
// are used. // are used.
PassMode::ByRef { size: Some(layout.size) } PassMode::ByRef {
size: Some(layout.size),
}
} else { } else {
PassMode::ByValPair(a, b) PassMode::ByValPair(a, b)
} }
@ -104,11 +104,15 @@ pub(super) fn get_pass_mode<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>)
if let Some(vector_ty) = crate::intrinsics::clif_vector_type(tcx, layout) { if let Some(vector_ty) = crate::intrinsics::clif_vector_type(tcx, layout) {
PassMode::ByVal(vector_ty) PassMode::ByVal(vector_ty)
} else { } else {
PassMode::ByRef { size: Some(layout.size) } PassMode::ByRef {
size: Some(layout.size),
}
} }
} }
Abi::Aggregate { sized: true } => PassMode::ByRef { size: Some(layout.size) }, Abi::Aggregate { sized: true } => PassMode::ByRef {
size: Some(layout.size),
},
Abi::Aggregate { sized: false } => PassMode::ByRef { size: None }, Abi::Aggregate { sized: false } => PassMode::ByRef { size: None },
} }
} }
@ -125,22 +129,18 @@ pub(super) fn adjust_arg_for_abi<'tcx>(
let (a, b) = arg.load_scalar_pair(fx); let (a, b) = arg.load_scalar_pair(fx);
Pair(a, b) Pair(a, b)
} }
PassMode::ByRef { size: _ } => { PassMode::ByRef { size: _ } => match arg.force_stack(fx) {
match arg.force_stack(fx) { (ptr, None) => Single(ptr.get_addr(fx)),
(ptr, None) => Single(ptr.get_addr(fx)), (ptr, Some(meta)) => Pair(ptr.get_addr(fx), meta),
(ptr, Some(meta)) => Pair(ptr.get_addr(fx), meta), },
}
}
} }
} }
pub(super) fn cvalue_for_param<'tcx>( pub(super) fn cvalue_for_param<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>, fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
start_block: Block, start_block: Block,
#[cfg_attr(not(debug_assertions), allow(unused_variables))] #[cfg_attr(not(debug_assertions), allow(unused_variables))] local: Option<mir::Local>,
local: Option<mir::Local>, #[cfg_attr(not(debug_assertions), allow(unused_variables))] local_field: Option<usize>,
#[cfg_attr(not(debug_assertions), allow(unused_variables))]
local_field: Option<usize>,
arg_ty: Ty<'tcx>, arg_ty: Ty<'tcx>,
) -> Option<CValue<'tcx>> { ) -> Option<CValue<'tcx>> {
let layout = fx.layout_of(arg_ty); let layout = fx.layout_of(arg_ty);
@ -171,7 +171,10 @@ pub(super) fn cvalue_for_param<'tcx>(
let (a, b) = block_params.assert_pair(); let (a, b) = block_params.assert_pair();
Some(CValue::by_val_pair(a, b, layout)) Some(CValue::by_val_pair(a, b, layout))
} }
PassMode::ByRef { size: Some(_) } => Some(CValue::by_ref(Pointer::new(block_params.assert_single()), layout)), PassMode::ByRef { size: Some(_) } => Some(CValue::by_ref(
Pointer::new(block_params.assert_single()),
layout,
)),
PassMode::ByRef { size: None } => { PassMode::ByRef { size: None } => {
let (ptr, meta) = block_params.assert_pair(); let (ptr, meta) = block_params.assert_pair();
Some(CValue::by_ref_unsized(Pointer::new(ptr), meta, layout)) Some(CValue::by_ref_unsized(Pointer::new(ptr), meta, layout))

View File

@ -5,11 +5,14 @@ fn return_layout<'a, 'tcx>(fx: &mut FunctionCx<'a, 'tcx, impl Backend>) -> TyAnd
fx.layout_of(fx.monomorphize(&fx.mir.local_decls[RETURN_PLACE].ty)) fx.layout_of(fx.monomorphize(&fx.mir.local_decls[RETURN_PLACE].ty))
} }
pub(crate) fn can_return_to_ssa_var<'tcx>(tcx: TyCtxt<'tcx>, dest_layout: TyAndLayout<'tcx>) -> bool { pub(crate) fn can_return_to_ssa_var<'tcx>(
tcx: TyCtxt<'tcx>,
dest_layout: TyAndLayout<'tcx>,
) -> bool {
match get_pass_mode(tcx, dest_layout) { match get_pass_mode(tcx, dest_layout) {
PassMode::NoPass | PassMode::ByVal(_) => true, PassMode::NoPass | PassMode::ByVal(_) => true,
// FIXME Make it possible to return ByValPair and ByRef to an ssa var. // FIXME Make it possible to return ByValPair and ByRef to an ssa var.
PassMode::ByValPair(_, _) | PassMode::ByRef { size: _ } => false PassMode::ByValPair(_, _) | PassMode::ByRef { size: _ } => false,
} }
} }
@ -35,8 +38,10 @@ pub(super) fn codegen_return_param(
} }
PassMode::ByRef { size: Some(_) } => { PassMode::ByRef { size: Some(_) } => {
let ret_param = fx.bcx.append_block_param(start_block, fx.pointer_type); let ret_param = fx.bcx.append_block_param(start_block, fx.pointer_type);
fx.local_map fx.local_map.insert(
.insert(RETURN_PLACE, CPlace::for_ptr(Pointer::new(ret_param), ret_layout)); RETURN_PLACE,
CPlace::for_ptr(Pointer::new(ret_param), ret_layout),
);
Single(ret_param) Single(ret_param)
} }
@ -69,7 +74,7 @@ pub(super) fn codegen_with_call_return_arg<'tcx, B: Backend, T>(
let output_pass_mode = get_pass_mode(fx.tcx, ret_layout); let output_pass_mode = get_pass_mode(fx.tcx, ret_layout);
let return_ptr = match output_pass_mode { let return_ptr = match output_pass_mode {
PassMode::NoPass => None, PassMode::NoPass => None,
PassMode::ByRef { size: Some(_)} => match ret_place { PassMode::ByRef { size: Some(_) } => match ret_place {
Some(ret_place) => Some(ret_place.to_ptr().get_addr(fx)), Some(ret_place) => Some(ret_place.to_ptr().get_addr(fx)),
None => Some(fx.bcx.ins().iconst(fx.pointer_type, 43)), // FIXME allocate temp stack slot None => Some(fx.bcx.ins().iconst(fx.pointer_type, 43)), // FIXME allocate temp stack slot
}, },

View File

@ -102,11 +102,13 @@ fn codegen_inner(
bcx.seal_all_blocks(); bcx.seal_all_blocks();
bcx.finalize(); bcx.finalize();
} }
module.define_function( module
func_id, .define_function(
&mut ctx, func_id,
&mut cranelift_codegen::binemit::NullTrapSink {}, &mut ctx,
).unwrap(); &mut cranelift_codegen::binemit::NullTrapSink {},
)
.unwrap();
unwind_context.add_function(func_id, &ctx, module.isa()); unwind_context.add_function(func_id, &ctx, module.isa());
} }
} }

View File

@ -1,7 +1,7 @@
use crate::prelude::*; use crate::prelude::*;
use rustc_middle::mir::StatementKind::*;
use rustc_index::vec::IndexVec; use rustc_index::vec::IndexVec;
use rustc_middle::mir::StatementKind::*;
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub(crate) enum SsaKind { pub(crate) enum SsaKind {
@ -10,20 +10,25 @@ pub(crate) enum SsaKind {
} }
pub(crate) fn analyze(fx: &FunctionCx<'_, '_, impl Backend>) -> IndexVec<Local, SsaKind> { pub(crate) fn analyze(fx: &FunctionCx<'_, '_, impl Backend>) -> IndexVec<Local, SsaKind> {
let mut flag_map = fx.mir.local_decls.iter().map(|local_decl| { let mut flag_map = fx
let ty = fx.monomorphize(&local_decl.ty); .mir
if fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some() { .local_decls
SsaKind::Ssa .iter()
} else { .map(|local_decl| {
SsaKind::NotSsa let ty = fx.monomorphize(&local_decl.ty);
} if fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some() {
}).collect::<IndexVec<Local, SsaKind>>(); SsaKind::Ssa
} else {
SsaKind::NotSsa
}
})
.collect::<IndexVec<Local, SsaKind>>();
for bb in fx.mir.basic_blocks().iter() { for bb in fx.mir.basic_blocks().iter() {
for stmt in bb.statements.iter() { for stmt in bb.statements.iter() {
match &stmt.kind { match &stmt.kind {
Assign(place_and_rval) => match &place_and_rval.1 { Assign(place_and_rval) => match &place_and_rval.1 {
Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place)=> { Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
not_ssa(&mut flag_map, place.local) not_ssa(&mut flag_map, place.local)
} }
_ => {} _ => {}
@ -35,7 +40,8 @@ pub(crate) fn analyze(fx: &FunctionCx<'_, '_, impl Backend>) -> IndexVec<Local,
match &bb.terminator().kind { match &bb.terminator().kind {
TerminatorKind::Call { destination, .. } => { TerminatorKind::Call { destination, .. } => {
if let Some((dest_place, _dest_bb)) = destination { if let Some((dest_place, _dest_bb)) = destination {
let dest_layout = fx.layout_of(fx.monomorphize(&dest_place.ty(&fx.mir.local_decls, fx.tcx).ty)); let dest_layout = fx
.layout_of(fx.monomorphize(&dest_place.ty(&fx.mir.local_decls, fx.tcx).ty));
if !crate::abi::can_return_to_ssa_var(fx.tcx, dest_layout) { if !crate::abi::can_return_to_ssa_var(fx.tcx, dest_layout) {
not_ssa(&mut flag_map, dest_place.local) not_ssa(&mut flag_map, dest_place.local)
} }

View File

@ -2,9 +2,9 @@ use std::collections::BTreeMap;
use std::fs::File; use std::fs::File;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use rustc_session::Session;
use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder}; use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
use rustc_codegen_ssa::METADATA_FILENAME; use rustc_codegen_ssa::METADATA_FILENAME;
use rustc_session::Session;
use object::{Object, SymbolKind}; use object::{Object, SymbolKind};
@ -167,30 +167,45 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
entry.read_to_end(&mut data).unwrap(); entry.read_to_end(&mut data).unwrap();
data data
} }
ArchiveEntry::File(file) => { ArchiveEntry::File(file) => std::fs::read(file).unwrap_or_else(|err| {
std::fs::read(file).unwrap_or_else(|err| { sess.fatal(&format!(
sess.fatal(&format!("error while reading object file during archive building: {}", err)); "error while reading object file during archive building: {}",
}) err
} ));
}),
}; };
if !self.no_builtin_ranlib { if !self.no_builtin_ranlib {
match object::File::parse(&data) { match object::File::parse(&data) {
Ok(object) => { Ok(object) => {
symbol_table.insert(entry_name.as_bytes().to_vec(), object.symbols().filter_map(|(_index, symbol)| { symbol_table.insert(
if symbol.is_undefined() || symbol.is_local() || symbol.kind() != SymbolKind::Data && symbol.kind() != SymbolKind::Text && symbol.kind() != SymbolKind::Tls { entry_name.as_bytes().to_vec(),
None object
} else { .symbols()
symbol.name().map(|name| name.as_bytes().to_vec()) .filter_map(|(_index, symbol)| {
} if symbol.is_undefined()
}).collect::<Vec<_>>()); || symbol.is_local()
|| symbol.kind() != SymbolKind::Data
&& symbol.kind() != SymbolKind::Text
&& symbol.kind() != SymbolKind::Tls
{
None
} else {
symbol.name().map(|name| name.as_bytes().to_vec())
}
})
.collect::<Vec<_>>(),
);
} }
Err(err) => { Err(err) => {
let err = err.to_string(); let err = err.to_string();
if err == "Unknown file magic" { if err == "Unknown file magic" {
// Not an object file; skip it. // Not an object file; skip it.
} else { } else {
sess.fatal(&format!("error parsing `{}` during archive creation: {}", entry_name, err)); sess.fatal(&format!(
"error parsing `{}` during archive creation: {}",
entry_name, err
));
} }
} }
} }
@ -200,36 +215,44 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
} }
let mut builder = if self.use_gnu_style_archive { let mut builder = if self.use_gnu_style_archive {
BuilderKind::Gnu(ar::GnuBuilder::new( BuilderKind::Gnu(
File::create(&self.dst).unwrap_or_else(|err| { ar::GnuBuilder::new(
sess.fatal(&format!("error opening destination during archive building: {}", err)); File::create(&self.dst).unwrap_or_else(|err| {
}), sess.fatal(&format!(
entries "error opening destination during archive building: {}",
.iter() err
.map(|(name, _)| name.as_bytes().to_vec()) ));
.collect(), }),
ar::GnuSymbolTableFormat::Size32, entries
symbol_table, .iter()
).unwrap()) .map(|(name, _)| name.as_bytes().to_vec())
.collect(),
ar::GnuSymbolTableFormat::Size32,
symbol_table,
)
.unwrap(),
)
} else { } else {
BuilderKind::Bsd(ar::Builder::new( BuilderKind::Bsd(
File::create(&self.dst).unwrap_or_else(|err| { ar::Builder::new(
sess.fatal(&format!("error opening destination during archive building: {}", err)); File::create(&self.dst).unwrap_or_else(|err| {
}), sess.fatal(&format!(
symbol_table, "error opening destination during archive building: {}",
).unwrap()) err
));
}),
symbol_table,
)
.unwrap(),
)
}; };
// Add all files // Add all files
for (entry_name, data) in entries.into_iter() { for (entry_name, data) in entries.into_iter() {
let header = ar::Header::new(entry_name.into_bytes(), data.len() as u64); let header = ar::Header::new(entry_name.into_bytes(), data.len() as u64);
match builder { match builder {
BuilderKind::Bsd(ref mut builder) => builder BuilderKind::Bsd(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
.append(&header, &mut &*data) BuilderKind::Gnu(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
.unwrap(),
BuilderKind::Gnu(ref mut builder) => builder
.append(&header, &mut &*data)
.unwrap(),
} }
} }
@ -246,7 +269,8 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
.expect("Couldn't run ranlib"); .expect("Couldn't run ranlib");
if !status.success() { if !status.success() {
self.sess.fatal(&format!("Ranlib exited with code {:?}", status.code())); self.sess
.fatal(&format!("Ranlib exited with code {:?}", status.code()));
} }
} }
} }
@ -263,9 +287,8 @@ impl<'a> ArArchiveBuilder<'a> {
let mut i = 0; let mut i = 0;
while let Some(entry) = archive.next_entry() { while let Some(entry) = archive.next_entry() {
let entry = entry?; let entry = entry?;
let file_name = String::from_utf8(entry.header().identifier().to_vec()).map_err(|err| { let file_name = String::from_utf8(entry.header().identifier().to_vec())
std::io::Error::new(std::io::ErrorKind::InvalidData, err) .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?;
})?;
if !skip(&file_name) { if !skip(&file_name) {
self.entries.push(( self.entries.push((
file_name, file_name,

View File

@ -7,7 +7,8 @@ use crate::prelude::*;
#[cfg(feature = "jit")] #[cfg(feature = "jit")]
#[no_mangle] #[no_mangle]
pub static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t = libc::PTHREAD_MUTEX_INITIALIZER; pub static mut __cg_clif_global_atomic_mutex: libc::pthread_mutex_t =
libc::PTHREAD_MUTEX_INITIALIZER;
pub(crate) fn init_global_lock(module: &mut Module<impl Backend>, bcx: &mut FunctionBuilder<'_>) { pub(crate) fn init_global_lock(module: &mut Module<impl Backend>, bcx: &mut FunctionBuilder<'_>) {
if std::env::var("CG_CLIF_JIT").is_ok() { if std::env::var("CG_CLIF_JIT").is_ok() {
@ -19,28 +20,42 @@ pub(crate) fn init_global_lock(module: &mut Module<impl Backend>, bcx: &mut Func
let mut data_ctx = DataContext::new(); let mut data_ctx = DataContext::new();
data_ctx.define_zeroinit(1024); // 1024 bytes should be big enough on all platforms. data_ctx.define_zeroinit(1024); // 1024 bytes should be big enough on all platforms.
let atomic_mutex = module.declare_data( let atomic_mutex = module
"__cg_clif_global_atomic_mutex", .declare_data(
Linkage::Export, "__cg_clif_global_atomic_mutex",
true, Linkage::Export,
false, true,
Some(16), false,
).unwrap(); Some(16),
)
.unwrap();
module.define_data(atomic_mutex, &data_ctx).unwrap(); module.define_data(atomic_mutex, &data_ctx).unwrap();
let pthread_mutex_init = module.declare_function("pthread_mutex_init", Linkage::Import, &cranelift_codegen::ir::Signature { let pthread_mutex_init = module
call_conv: module.target_config().default_call_conv, .declare_function(
params: vec![ "pthread_mutex_init",
AbiParam::new(module.target_config().pointer_type() /* *mut pthread_mutex_t */), Linkage::Import,
AbiParam::new(module.target_config().pointer_type() /* *const pthread_mutex_attr_t */), &cranelift_codegen::ir::Signature {
], call_conv: module.target_config().default_call_conv,
returns: vec![AbiParam::new(types::I32 /* c_int */)], params: vec![
}).unwrap(); AbiParam::new(
module.target_config().pointer_type(), /* *mut pthread_mutex_t */
),
AbiParam::new(
module.target_config().pointer_type(), /* *const pthread_mutex_attr_t */
),
],
returns: vec![AbiParam::new(types::I32 /* c_int */)],
},
)
.unwrap();
let pthread_mutex_init = module.declare_func_in_func(pthread_mutex_init, bcx.func); let pthread_mutex_init = module.declare_func_in_func(pthread_mutex_init, bcx.func);
let atomic_mutex = module.declare_data_in_func(atomic_mutex, bcx.func); let atomic_mutex = module.declare_data_in_func(atomic_mutex, bcx.func);
let atomic_mutex = bcx.ins().global_value(module.target_config().pointer_type(), atomic_mutex); let atomic_mutex = bcx
.ins()
.global_value(module.target_config().pointer_type(), atomic_mutex);
let nullptr = bcx.ins().iconst(module.target_config().pointer_type(), 0); let nullptr = bcx.ins().iconst(module.target_config().pointer_type(), 0);
@ -49,7 +64,7 @@ pub(crate) fn init_global_lock(module: &mut Module<impl Backend>, bcx: &mut Func
pub(crate) fn init_global_lock_constructor( pub(crate) fn init_global_lock_constructor(
module: &mut Module<impl Backend>, module: &mut Module<impl Backend>,
constructor_name: &str constructor_name: &str,
) -> FuncId { ) -> FuncId {
let sig = Signature::new(CallConv::SystemV); let sig = Signature::new(CallConv::SystemV);
let init_func_id = module let init_func_id = module
@ -71,61 +86,99 @@ pub(crate) fn init_global_lock_constructor(
bcx.seal_all_blocks(); bcx.seal_all_blocks();
bcx.finalize(); bcx.finalize();
} }
module.define_function( module
init_func_id, .define_function(
&mut ctx, init_func_id,
&mut cranelift_codegen::binemit::NullTrapSink {}, &mut ctx,
).unwrap(); &mut cranelift_codegen::binemit::NullTrapSink {},
)
.unwrap();
init_func_id init_func_id
} }
pub(crate) fn lock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) { pub(crate) fn lock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
let atomic_mutex = fx.cx.module.declare_data( let atomic_mutex = fx
"__cg_clif_global_atomic_mutex", .cx
Linkage::Import, .module
true, .declare_data(
false, "__cg_clif_global_atomic_mutex",
None, Linkage::Import,
).unwrap(); true,
false,
None,
)
.unwrap();
let pthread_mutex_lock = fx.cx.module.declare_function("pthread_mutex_lock", Linkage::Import, &cranelift_codegen::ir::Signature { let pthread_mutex_lock = fx
call_conv: fx.cx.module.target_config().default_call_conv, .cx
params: vec![ .module
AbiParam::new(fx.cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */), .declare_function(
], "pthread_mutex_lock",
returns: vec![AbiParam::new(types::I32 /* c_int */)], Linkage::Import,
}).unwrap(); &cranelift_codegen::ir::Signature {
call_conv: fx.cx.module.target_config().default_call_conv,
params: vec![AbiParam::new(
fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
)],
returns: vec![AbiParam::new(types::I32 /* c_int */)],
},
)
.unwrap();
let pthread_mutex_lock = fx.cx.module.declare_func_in_func(pthread_mutex_lock, fx.bcx.func); let pthread_mutex_lock = fx
.cx
.module
.declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func); let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
let atomic_mutex = fx.bcx.ins().global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex); let atomic_mutex = fx
.bcx
.ins()
.global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
fx.bcx.ins().call(pthread_mutex_lock, &[atomic_mutex]); fx.bcx.ins().call(pthread_mutex_lock, &[atomic_mutex]);
} }
pub(crate) fn unlock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) { pub(crate) fn unlock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
let atomic_mutex = fx.cx.module.declare_data( let atomic_mutex = fx
"__cg_clif_global_atomic_mutex", .cx
Linkage::Import, .module
true, .declare_data(
false, "__cg_clif_global_atomic_mutex",
None, Linkage::Import,
).unwrap(); true,
false,
None,
)
.unwrap();
let pthread_mutex_unlock = fx.cx.module.declare_function("pthread_mutex_unlock", Linkage::Import, &cranelift_codegen::ir::Signature { let pthread_mutex_unlock = fx
call_conv: fx.cx.module.target_config().default_call_conv, .cx
params: vec![ .module
AbiParam::new(fx.cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */), .declare_function(
], "pthread_mutex_unlock",
returns: vec![AbiParam::new(types::I32 /* c_int */)], Linkage::Import,
}).unwrap(); &cranelift_codegen::ir::Signature {
call_conv: fx.cx.module.target_config().default_call_conv,
params: vec![AbiParam::new(
fx.cx.module.target_config().pointer_type(), /* *mut pthread_mutex_t */
)],
returns: vec![AbiParam::new(types::I32 /* c_int */)],
},
)
.unwrap();
let pthread_mutex_unlock = fx.cx.module.declare_func_in_func(pthread_mutex_unlock, fx.bcx.func); let pthread_mutex_unlock = fx
.cx
.module
.declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func); let atomic_mutex = fx.cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
let atomic_mutex = fx.bcx.ins().global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex); let atomic_mutex = fx
.bcx
.ins()
.global_value(fx.cx.module.target_config().pointer_type(), atomic_mutex);
fx.bcx.ins().call(pthread_mutex_unlock, &[atomic_mutex]); fx.bcx.ins().call(pthread_mutex_unlock, &[atomic_mutex]);
} }

View File

@ -5,8 +5,8 @@ use rustc_session::Session;
use cranelift_module::{FuncId, Module}; use cranelift_module::{FuncId, Module};
use object::{SectionKind, SymbolFlags, RelocationKind, RelocationEncoding};
use object::write::*; use object::write::*;
use object::{RelocationEncoding, RelocationKind, SectionKind, SymbolFlags};
use cranelift_object::{ObjectBackend, ObjectBuilder, ObjectProduct}; use cranelift_object::{ObjectBackend, ObjectBuilder, ObjectProduct};
@ -20,7 +20,9 @@ pub(crate) trait WriteMetadata {
impl WriteMetadata for object::write::Object { impl WriteMetadata for object::write::Object {
fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, _is_like_osx: bool) { fn add_rustc_section(&mut self, symbol_name: String, data: Vec<u8>, _is_like_osx: bool) {
let segment = self.segment_name(object::write::StandardSegment::Data).to_vec(); let segment = self
.segment_name(object::write::StandardSegment::Data)
.to_vec();
let section_id = self.add_section(segment, b".rustc".to_vec(), object::SectionKind::Data); let section_id = self.add_section(segment, b".rustc".to_vec(), object::SectionKind::Data);
let offset = self.append_section_data(section_id, &data, 1); let offset = self.append_section_data(section_id, &data, 1);
// For MachO and probably PE this is necessary to prevent the linker from throwing away the // For MachO and probably PE this is necessary to prevent the linker from throwing away the
@ -62,7 +64,8 @@ impl WriteDebugInfo for ObjectProduct {
id.name().replace('.', "__") // machO expects __debug_info instead of .debug_info id.name().replace('.', "__") // machO expects __debug_info instead of .debug_info
} else { } else {
id.name().to_string() id.name().to_string()
}.into_bytes(); }
.into_bytes();
let segment = self.object.segment_name(StandardSegment::Debug).to_vec(); let segment = self.object.segment_name(StandardSegment::Debug).to_vec();
let section_id = self.object.add_section(segment, name, SectionKind::Debug); let section_id = self.object.add_section(segment, name, SectionKind::Debug);
@ -78,22 +81,27 @@ impl WriteDebugInfo for ObjectProduct {
reloc: &DebugReloc, reloc: &DebugReloc,
) { ) {
let (symbol, symbol_offset) = match reloc.name { let (symbol, symbol_offset) = match reloc.name {
DebugRelocName::Section(id) => { DebugRelocName::Section(id) => (section_map.get(&id).unwrap().1, 0),
(section_map.get(&id).unwrap().1, 0)
}
DebugRelocName::Symbol(id) => { DebugRelocName::Symbol(id) => {
let symbol_id = self.function_symbol(FuncId::from_u32(id.try_into().unwrap())); let symbol_id = self.function_symbol(FuncId::from_u32(id.try_into().unwrap()));
self.object.symbol_section_and_offset(symbol_id).expect("Debug reloc for undef sym???") self.object
.symbol_section_and_offset(symbol_id)
.expect("Debug reloc for undef sym???")
} }
}; };
self.object.add_relocation(from.0, Relocation { self.object
offset: u64::from(reloc.offset), .add_relocation(
symbol, from.0,
kind: RelocationKind::Absolute, Relocation {
encoding: RelocationEncoding::Generic, offset: u64::from(reloc.offset),
size: reloc.size * 8, symbol,
addend: i64::try_from(symbol_offset).unwrap() + reloc.addend, kind: RelocationKind::Absolute,
}).unwrap(); encoding: RelocationEncoding::Generic,
size: reloc.size * 8,
addend: i64::try_from(symbol_offset).unwrap() + reloc.addend,
},
)
.unwrap();
} }
} }
@ -105,21 +113,32 @@ pub(crate) trait AddConstructor {
impl AddConstructor for ObjectProduct { impl AddConstructor for ObjectProduct {
fn add_constructor(&mut self, func_id: FuncId) { fn add_constructor(&mut self, func_id: FuncId) {
let symbol = self.function_symbol(func_id); let symbol = self.function_symbol(func_id);
let segment = self.object.segment_name(object::write::StandardSegment::Data); let segment = self
let init_array_section = self.object.add_section(segment.to_vec(), b".init_array".to_vec(), SectionKind::Data); .object
.segment_name(object::write::StandardSegment::Data);
let init_array_section =
self.object
.add_section(segment.to_vec(), b".init_array".to_vec(), SectionKind::Data);
self.object.append_section_data( self.object.append_section_data(
init_array_section, init_array_section,
&std::iter::repeat(0).take(8 /*FIXME pointer size*/).collect::<Vec<u8>>(), &std::iter::repeat(0)
.take(8 /*FIXME pointer size*/)
.collect::<Vec<u8>>(),
8, 8,
); );
self.object.add_relocation(init_array_section, object::write::Relocation { self.object
offset: 0, .add_relocation(
size: 64, // FIXME pointer size init_array_section,
kind: RelocationKind::Absolute, object::write::Relocation {
encoding: RelocationEncoding::Generic, offset: 0,
symbol, size: 64, // FIXME pointer size
addend: 0, kind: RelocationKind::Absolute,
}).unwrap(); encoding: RelocationEncoding::Generic,
symbol,
addend: 0,
},
)
.unwrap();
} }
} }
@ -153,7 +172,7 @@ pub(crate) fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Object
architecture => sess.fatal(&format!( architecture => sess.fatal(&format!(
"target architecture {:?} is unsupported", "target architecture {:?} is unsupported",
architecture, architecture,
)) )),
}; };
let endian = match triple.endianness().unwrap() { let endian = match triple.endianness().unwrap() {
target_lexicon::Endianness::Little => object::Endianness::Little, target_lexicon::Endianness::Little => object::Endianness::Little,
@ -166,7 +185,8 @@ pub(crate) fn with_object(sess: &Session, name: &str, f: impl FnOnce(&mut Object
metadata_object.write().unwrap() metadata_object.write().unwrap()
} }
pub(crate) type Backend = impl cranelift_module::Backend<Product: AddConstructor + Emit + WriteDebugInfo>; pub(crate) type Backend =
impl cranelift_module::Backend<Product: AddConstructor + Emit + WriteDebugInfo>;
pub(crate) fn make_module(sess: &Session, name: String) -> Module<Backend> { pub(crate) fn make_module(sess: &Session, name: String) -> Module<Backend> {
let module: Module<ObjectBackend> = Module::new( let module: Module<ObjectBackend> = Module::new(
@ -174,7 +194,8 @@ pub(crate) fn make_module(sess: &Session, name: String) -> Module<Backend> {
crate::build_isa(sess, true), crate::build_isa(sess, true),
name + ".o", name + ".o",
cranelift_module::default_libcall_names(), cranelift_module::default_libcall_names(),
).unwrap(), )
.unwrap(),
); );
module module
} }

View File

@ -1,5 +1,5 @@
use rustc_middle::ty::adjustment::PointerCast;
use rustc_index::vec::IndexVec; use rustc_index::vec::IndexVec;
use rustc_middle::ty::adjustment::PointerCast;
use crate::prelude::*; use crate::prelude::*;
@ -29,7 +29,9 @@ pub(crate) fn trans_fn<'tcx, B: Backend + 'static>(
// Predefine blocks // Predefine blocks
let start_block = bcx.create_block(); let start_block = bcx.create_block();
let block_map: IndexVec<BasicBlock, Block> = (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect(); let block_map: IndexVec<BasicBlock, Block> = (0..mir.basic_blocks().len())
.map(|_| bcx.create_block())
.collect();
// Make FunctionCx // Make FunctionCx
let pointer_type = cx.module.target_config().pointer_type(); let pointer_type = cx.module.target_config().pointer_type();
@ -56,15 +58,22 @@ pub(crate) fn trans_fn<'tcx, B: Backend + 'static>(
inline_asm_index: 0, inline_asm_index: 0,
}; };
let arg_uninhabited = fx.mir.args_iter().any(|arg| fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty)).abi.is_uninhabited()); let arg_uninhabited = fx.mir.args_iter().any(|arg| {
fx.layout_of(fx.monomorphize(&fx.mir.local_decls[arg].ty))
.abi
.is_uninhabited()
});
if arg_uninhabited { if arg_uninhabited {
fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]); fx.bcx
.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
fx.bcx.switch_to_block(fx.block_map[START_BLOCK]); fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument"); crate::trap::trap_unreachable(&mut fx, "function has uninhabited argument");
} else { } else {
tcx.sess.time("codegen clif ir", || { tcx.sess.time("codegen clif ir", || {
tcx.sess.time("codegen prelude", || crate::abi::codegen_fn_prelude(&mut fx, start_block)); tcx.sess.time("codegen prelude", || {
crate::abi::codegen_fn_prelude(&mut fx, start_block)
});
codegen_fn_content(&mut fx); codegen_fn_content(&mut fx);
}); });
} }
@ -80,21 +89,20 @@ pub(crate) fn trans_fn<'tcx, B: Backend + 'static>(
let context = &mut cx.cached_context; let context = &mut cx.cached_context;
context.func = func; context.func = func;
crate::pretty_clif::write_clif_file( crate::pretty_clif::write_clif_file(tcx, "unopt", None, instance, &context, &clif_comments);
tcx,
"unopt",
None,
instance,
&context,
&clif_comments,
);
// Verify function // Verify function
verify_func(tcx, &clif_comments, &context.func); verify_func(tcx, &clif_comments, &context.func);
// Perform rust specific optimizations // Perform rust specific optimizations
tcx.sess.time("optimize clif ir", || { tcx.sess.time("optimize clif ir", || {
crate::optimize::optimize_function(tcx, instance, context, &cold_blocks, &mut clif_comments); crate::optimize::optimize_function(
tcx,
instance,
context,
&cold_blocks,
&mut clif_comments,
);
}); });
// If the return block is not reachable, then the SSA builder may have inserted a `iconst.i128` // If the return block is not reachable, then the SSA builder may have inserted a `iconst.i128`
@ -105,14 +113,15 @@ pub(crate) fn trans_fn<'tcx, B: Backend + 'static>(
// Define function // Define function
let module = &mut cx.module; let module = &mut cx.module;
tcx.sess.time( tcx.sess.time("define function", || {
"define function", module
|| module.define_function( .define_function(
func_id, func_id,
context, context,
&mut cranelift_codegen::binemit::NullTrapSink {}, &mut cranelift_codegen::binemit::NullTrapSink {},
).unwrap(), )
); .unwrap()
});
// Write optimized function to file for debugging // Write optimized function to file for debugging
crate::pretty_clif::write_clif_file( crate::pretty_clif::write_clif_file(
@ -130,7 +139,15 @@ pub(crate) fn trans_fn<'tcx, B: Backend + 'static>(
let unwind_context = &mut cx.unwind_context; let unwind_context = &mut cx.unwind_context;
tcx.sess.time("generate debug info", || { tcx.sess.time("generate debug info", || {
if let Some(debug_context) = debug_context { if let Some(debug_context) = debug_context {
debug_context.define_function(instance, func_id, &name, isa, context, &source_info_set, local_map); debug_context.define_function(
instance,
func_id,
&name,
isa,
context,
&source_info_set,
local_map,
);
} }
unwind_context.add_function(func_id, &context, isa); unwind_context.add_function(func_id, &context, isa);
}); });
@ -139,7 +156,11 @@ pub(crate) fn trans_fn<'tcx, B: Backend + 'static>(
context.clear(); context.clear();
} }
pub(crate) fn verify_func(tcx: TyCtxt<'_>, writer: &crate::pretty_clif::CommentWriter, func: &Function) { pub(crate) fn verify_func(
tcx: TyCtxt<'_>,
writer: &crate::pretty_clif::CommentWriter,
func: &Function,
) {
tcx.sess.time("verify clif ir", || { tcx.sess.time("verify clif ir", || {
let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder()); let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
match cranelift_codegen::verify_function(&func, &flags) { match cranelift_codegen::verify_function(&func, &flags) {
@ -249,7 +270,9 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Backend>) {
fx.bcx.switch_to_block(failure); fx.bcx.switch_to_block(failure);
let location = fx.get_caller_location(bb_data.terminator().source_info.span).load_scalar(fx); let location = fx
.get_caller_location(bb_data.terminator().source_info.span)
.load_scalar(fx);
let args; let args;
let lang_item = match msg { let lang_item = match msg {
@ -262,20 +285,30 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Backend>) {
_ => { _ => {
let msg_str = msg.description(); let msg_str = msg.description();
let msg_ptr = fx.anonymous_str("assert", msg_str); let msg_ptr = fx.anonymous_str("assert", msg_str);
let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap()); let msg_len = fx
.bcx
.ins()
.iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
args = [msg_ptr, msg_len, location]; args = [msg_ptr, msg_len, location];
rustc_hir::lang_items::PanicFnLangItem rustc_hir::lang_items::PanicFnLangItem
} }
}; };
let def_id = fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| { let def_id = fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| {
fx.tcx.sess.span_fatal(bb_data.terminator().source_info.span, &s) fx.tcx
.sess
.span_fatal(bb_data.terminator().source_info.span, &s)
}); });
let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx); let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
let symbol_name = fx.tcx.symbol_name(instance).name; let symbol_name = fx.tcx.symbol_name(instance).name;
fx.lib_call(&*symbol_name, vec![fx.pointer_type, fx.pointer_type, fx.pointer_type], vec![], &args); fx.lib_call(
&*symbol_name,
vec![fx.pointer_type, fx.pointer_type, fx.pointer_type],
vec![],
&args,
);
crate::trap::trap_unreachable(fx, "panic lang item returned"); crate::trap::trap_unreachable(fx, "panic lang item returned");
} }
@ -303,14 +336,16 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Backend>) {
cleanup: _, cleanup: _,
from_hir_call: _, from_hir_call: _,
} => { } => {
fx.tcx.sess.time("codegen call", || crate::abi::codegen_terminator_call( fx.tcx.sess.time("codegen call", || {
fx, crate::abi::codegen_terminator_call(
*fn_span, fx,
block, *fn_span,
func, block,
args, func,
*destination, args,
)); *destination,
)
});
} }
TerminatorKind::InlineAsm { TerminatorKind::InlineAsm {
template, template,
@ -333,7 +368,10 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Backend>) {
fx.bcx.ins().jump(destination_block, &[]); fx.bcx.ins().jump(destination_block, &[]);
} }
None => { None => {
crate::trap::trap_unreachable(fx, "[corruption] Returned from noreturn inline asm"); crate::trap::trap_unreachable(
fx,
"[corruption] Returned from noreturn inline asm",
);
} }
} }
} }
@ -370,8 +408,7 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Backend>) {
fn trans_stmt<'tcx>( fn trans_stmt<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>, fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
#[allow(unused_variables)] #[allow(unused_variables)] cur_block: Block,
cur_block: Block,
stmt: &Statement<'tcx>, stmt: &Statement<'tcx>,
) { ) {
let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt)); let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
@ -439,30 +476,24 @@ fn trans_stmt<'tcx>(
let layout = operand.layout(); let layout = operand.layout();
let val = operand.load_scalar(fx); let val = operand.load_scalar(fx);
let res = match un_op { let res = match un_op {
UnOp::Not => { UnOp::Not => match layout.ty.kind {
match layout.ty.kind { ty::Bool => {
ty::Bool => { let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0); CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
}
ty::Uint(_) | ty::Int(_) => {
CValue::by_val(fx.bcx.ins().bnot(val), layout)
}
_ => unreachable!("un op Not for {:?}", layout.ty),
} }
} ty::Uint(_) | ty::Int(_) => {
CValue::by_val(fx.bcx.ins().bnot(val), layout)
}
_ => unreachable!("un op Not for {:?}", layout.ty),
},
UnOp::Neg => match layout.ty.kind { UnOp::Neg => match layout.ty.kind {
ty::Int(IntTy::I128) => { ty::Int(IntTy::I128) => {
// FIXME remove this case once ineg.i128 works // FIXME remove this case once ineg.i128 works
let zero = CValue::const_val(fx, layout, 0); let zero = CValue::const_val(fx, layout, 0);
crate::num::trans_int_binop(fx, BinOp::Sub, zero, operand) crate::num::trans_int_binop(fx, BinOp::Sub, zero, operand)
} }
ty::Int(_) => { ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
CValue::by_val(fx.bcx.ins().ineg(val), layout) ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
}
ty::Float(_) => {
CValue::by_val(fx.bcx.ins().fneg(val), layout)
}
_ => unreachable!("un op Neg for {:?}", layout.ty), _ => unreachable!("un op Neg for {:?}", layout.ty),
}, },
}; };
@ -474,9 +505,14 @@ fn trans_stmt<'tcx>(
match from_ty.kind { match from_ty.kind {
ty::FnDef(def_id, substs) => { ty::FnDef(def_id, substs) => {
let func_ref = fx.get_function_ref( let func_ref = fx.get_function_ref(
Instance::resolve_for_fn_ptr(fx.tcx, ParamEnv::reveal_all(), def_id, substs) Instance::resolve_for_fn_ptr(
.unwrap() fx.tcx,
.polymorphize(fx.tcx), ParamEnv::reveal_all(),
def_id,
substs,
)
.unwrap()
.polymorphize(fx.tcx),
); );
let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref); let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout)); lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
@ -505,7 +541,9 @@ fn trans_stmt<'tcx>(
|ty::TypeAndMut { |ty::TypeAndMut {
ty: pointee_ty, ty: pointee_ty,
mutbl: _, mutbl: _,
}| has_ptr_meta(fx.tcx, pointee_ty), }| {
has_ptr_meta(fx.tcx, pointee_ty)
},
) )
.unwrap_or(false) .unwrap_or(false)
} }
@ -527,13 +565,20 @@ fn trans_stmt<'tcx>(
_ => unreachable!("cast adt {} -> {}", from_ty, to_ty), _ => unreachable!("cast adt {} -> {}", from_ty, to_ty),
} }
use rustc_target::abi::{TagEncoding, Int, Variants}; use rustc_target::abi::{Int, TagEncoding, Variants};
match &operand.layout().variants { match &operand.layout().variants {
Variants::Single { index } => { Variants::Single { index } => {
let discr = operand.layout().ty.discriminant_for_variant(fx.tcx, *index).unwrap(); let discr = operand
.layout()
.ty
.discriminant_for_variant(fx.tcx, *index)
.unwrap();
let discr = if discr.ty.is_signed() { let discr = if discr.ty.is_signed() {
rustc_middle::mir::interpret::sign_extend(discr.val, fx.layout_of(discr.ty).size) rustc_middle::mir::interpret::sign_extend(
discr.val,
fx.layout_of(discr.ty).size,
)
} else { } else {
discr.val discr.val
}; };
@ -550,7 +595,8 @@ fn trans_stmt<'tcx>(
let cast_to = fx.clif_type(dest_layout.ty).unwrap(); let cast_to = fx.clif_type(dest_layout.ty).unwrap();
// Read the tag/niche-encoded discriminant from memory. // Read the tag/niche-encoded discriminant from memory.
let encoded_discr = operand.value_field(fx, mir::Field::new(*tag_field)); let encoded_discr =
operand.value_field(fx, mir::Field::new(*tag_field));
let encoded_discr = encoded_discr.load_scalar(fx); let encoded_discr = encoded_discr.load_scalar(fx);
// Decode the discriminant (specifically if it's niche-encoded). // Decode the discriminant (specifically if it's niche-encoded).
@ -562,7 +608,7 @@ fn trans_stmt<'tcx>(
let val = CValue::by_val(val, dest_layout); let val = CValue::by_val(val, dest_layout);
lval.write_cvalue(fx, val); lval.write_cvalue(fx, val);
} }
Variants::Multiple { ..} => unreachable!(), Variants::Multiple { .. } => unreachable!(),
} }
} else { } else {
let to_clif_ty = fx.clif_type(to_ty).unwrap(); let to_clif_ty = fx.clif_type(to_ty).unwrap();
@ -578,7 +624,11 @@ fn trans_stmt<'tcx>(
lval.write_cvalue(fx, CValue::by_val(res, dest_layout)); lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
} }
} }
Rvalue::Cast(CastKind::Pointer(PointerCast::ClosureFnPointer(_)), operand, _to_ty) => { Rvalue::Cast(
CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
operand,
_to_ty,
) => {
let operand = trans_operand(fx, operand); let operand = trans_operand(fx, operand);
match operand.layout().ty.kind { match operand.layout().ty.kind {
ty::Closure(def_id, substs) => { ty::Closure(def_id, substs) => {
@ -587,7 +637,8 @@ fn trans_stmt<'tcx>(
def_id, def_id,
substs, substs,
ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce,
).polymorphize(fx.tcx); )
.polymorphize(fx.tcx);
let func_ref = fx.get_function_ref(instance); let func_ref = fx.get_function_ref(instance);
let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref); let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout())); lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
@ -660,7 +711,8 @@ fn trans_stmt<'tcx>(
.ty .ty
.is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all())); .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()));
let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes(); let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
let val = CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into()); let val =
CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), ty_size.into());
lval.write_cvalue(fx, val); lval.write_cvalue(fx, val);
} }
Rvalue::Aggregate(kind, operands) => match **kind { Rvalue::Aggregate(kind, operands) => match **kind {
@ -691,12 +743,12 @@ fn trans_stmt<'tcx>(
inputs, inputs,
} = &**asm; } = &**asm;
let rustc_hir::LlvmInlineAsmInner { let rustc_hir::LlvmInlineAsmInner {
asm: asm_code, // Name asm: asm_code, // Name
outputs: output_names, // Vec<LlvmInlineAsmOutput> outputs: output_names, // Vec<LlvmInlineAsmOutput>
inputs: input_names, // Vec<Name> inputs: input_names, // Vec<Name>
clobbers, // Vec<Name> clobbers, // Vec<Name>
volatile, // bool volatile, // bool
alignstack, // bool alignstack, // bool
dialect: _, dialect: _,
asm_str_style: _, asm_str_style: _,
} = asm; } = asm;
@ -705,9 +757,15 @@ fn trans_stmt<'tcx>(
// Black box // Black box
} }
"mov %rbx, %rsi\n cpuid\n xchg %rbx, %rsi" => { "mov %rbx, %rsi\n cpuid\n xchg %rbx, %rsi" => {
assert_eq!(input_names, &[Symbol::intern("{eax}"), Symbol::intern("{ecx}")]); assert_eq!(
input_names,
&[Symbol::intern("{eax}"), Symbol::intern("{ecx}")]
);
assert_eq!(output_names.len(), 4); assert_eq!(output_names.len(), 4);
for (i, c) in (&["={eax}", "={esi}", "={ecx}", "={edx}"]).iter().enumerate() { for (i, c) in (&["={eax}", "={esi}", "={ecx}", "={edx}"])
.iter()
.enumerate()
{
assert_eq!(&output_names[i].constraint.as_str(), c); assert_eq!(&output_names[i].constraint.as_str(), c);
assert!(!output_names[i].is_rw); assert!(!output_names[i].is_rw);
assert!(!output_names[i].is_indirect); assert!(!output_names[i].is_indirect);
@ -722,13 +780,18 @@ fn trans_stmt<'tcx>(
let leaf = trans_operand(fx, &inputs[0].1).load_scalar(fx); // %eax let leaf = trans_operand(fx, &inputs[0].1).load_scalar(fx); // %eax
let subleaf = trans_operand(fx, &inputs[1].1).load_scalar(fx); // %ecx let subleaf = trans_operand(fx, &inputs[1].1).load_scalar(fx); // %ecx
let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, subleaf); let (eax, ebx, ecx, edx) =
crate::intrinsics::codegen_cpuid_call(fx, leaf, subleaf);
assert_eq!(outputs.len(), 4); assert_eq!(outputs.len(), 4);
trans_place(fx, outputs[0]).write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32))); trans_place(fx, outputs[0])
trans_place(fx, outputs[1]).write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32))); .write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
trans_place(fx, outputs[2]).write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32))); trans_place(fx, outputs[1])
trans_place(fx, outputs[3]).write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32))); .write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
trans_place(fx, outputs[2])
.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
trans_place(fx, outputs[3])
.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
} }
"xgetbv" => { "xgetbv" => {
assert_eq!(input_names, &[Symbol::intern("{ecx}")]); assert_eq!(input_names, &[Symbol::intern("{ecx}")]);
@ -748,7 +811,12 @@ fn trans_stmt<'tcx>(
crate::trap::trap_unimplemented(fx, "_xgetbv arch intrinsic is not supported"); crate::trap::trap_unimplemented(fx, "_xgetbv arch intrinsic is not supported");
} }
// ___chkstk, ___chkstk_ms and __alloca are only used on Windows // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
_ if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") => { _ if fx
.tcx
.symbol_name(fx.instance)
.name
.starts_with("___chkstk") =>
{
crate::trap::trap_unimplemented(fx, "Stack probes are not supported"); crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
} }
_ if fx.tcx.symbol_name(fx.instance).name == "__alloca" => { _ if fx.tcx.symbol_name(fx.instance).name == "__alloca" => {
@ -758,7 +826,10 @@ fn trans_stmt<'tcx>(
"int $$0x29" => { "int $$0x29" => {
crate::trap::trap_unimplemented(fx, "Windows abort"); crate::trap::trap_unimplemented(fx, "Windows abort");
} }
_ => fx.tcx.sess.span_fatal(stmt.source_info.span, "Inline assembly is not supported"), _ => fx
.tcx
.sess
.span_fatal(stmt.source_info.span, "Inline assembly is not supported"),
} }
} }
StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"), StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
@ -771,7 +842,8 @@ fn codegen_array_len<'tcx>(
) -> Value { ) -> Value {
match place.layout().ty.kind { match place.layout().ty.kind {
ty::Array(_elem_ty, len) => { ty::Array(_elem_ty, len) => {
let len = fx.monomorphize(&len) let len = fx
.monomorphize(&len)
.eval(fx.tcx, ParamEnv::reveal_all()) .eval(fx.tcx, ParamEnv::reveal_all())
.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64; .eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
fx.bcx.ins().iconst(fx.pointer_type, len) fx.bcx.ins().iconst(fx.pointer_type, len)
@ -836,7 +908,9 @@ pub(crate) fn trans_place<'tcx>(
let len = len.unwrap(); let len = len.unwrap();
cplace = CPlace::for_ptr_with_extra( cplace = CPlace::for_ptr_with_extra(
ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * i64::from(from)), ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * i64::from(from)),
fx.bcx.ins().iadd_imm(len, -(i64::from(from) + i64::from(to))), fx.bcx
.ins()
.iadd_imm(len, -(i64::from(from) + i64::from(to))),
cplace.layout(), cplace.layout(),
); );
} }

View File

@ -98,7 +98,7 @@ pub(crate) fn maybe_codegen<'tcx>(
// Optimize `val >> 64`, because compiler_builtins uses it to deconstruct an 128bit // Optimize `val >> 64`, because compiler_builtins uses it to deconstruct an 128bit
// integer into its lsb and msb. // integer into its lsb and msb.
// https://github.com/rust-lang-nursery/compiler-builtins/blob/79a6a1603d5672cbb9187ff41ff4d9b5048ac1cb/src/int/mod.rs#L217 // https://github.com/rust-lang-nursery/compiler-builtins/blob/79a6a1603d5672cbb9187ff41ff4d9b5048ac1cb/src/int/mod.rs#L217
if resolve_value_imm(fx.bcx.func, rhs_val) == Some(64) { if resolve_value_imm(fx.bcx.func, rhs_val) == Some(64) {
let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs_val); let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs_val);
let all_zeros = fx.bcx.ins().iconst(types::I64, 0); let all_zeros = fx.bcx.ins().iconst(types::I64, 0);
let val = match (bin_op, is_signed) { let val = match (bin_op, is_signed) {

View File

@ -1,6 +1,6 @@
use rustc_index::vec::IndexVec;
use rustc_target::abi::{Integer, Primitive}; use rustc_target::abi::{Integer, Primitive};
use rustc_target::spec::{HasTargetSpec, Target}; use rustc_target::spec::{HasTargetSpec, Target};
use rustc_index::vec::IndexVec;
use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef}; use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
@ -55,7 +55,11 @@ fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Typ
FloatTy::F64 => types::F64, FloatTy::F64 => types::F64,
}, },
ty::FnPtr(_) => pointer_ty(tcx), ty::FnPtr(_) => pointer_ty(tcx),
ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => { ty::RawPtr(TypeAndMut {
ty: pointee_ty,
mutbl: _,
})
| ty::Ref(_, pointee_ty, _) => {
if has_ptr_meta(tcx, pointee_ty) { if has_ptr_meta(tcx, pointee_ty) {
return None; return None;
} else { } else {
@ -63,7 +67,8 @@ fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Typ
} }
} }
ty::Adt(adt_def, _) if adt_def.repr.simd() => { ty::Adt(adt_def, _) if adt_def.repr.simd() => {
let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi { let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
{
Abi::Vector { element, count } => (element.clone(), *count), Abi::Vector { element, count } => (element.clone(), *count),
_ => unreachable!(), _ => unreachable!(),
}; };
@ -79,7 +84,10 @@ fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Typ
}) })
} }
fn clif_pair_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<(types::Type, types::Type)> { fn clif_pair_type_from_ty<'tcx>(
tcx: TyCtxt<'tcx>,
ty: Ty<'tcx>,
) -> Option<(types::Type, types::Type)> {
Some(match ty.kind { Some(match ty.kind {
ty::Tuple(substs) if substs.len() == 2 => { ty::Tuple(substs) if substs.len() == 2 => {
let mut types = substs.types(); let mut types = substs.types();
@ -90,11 +98,15 @@ fn clif_pair_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<(type
} }
(a, b) (a, b)
} }
ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => { ty::RawPtr(TypeAndMut {
ty: pointee_ty,
mutbl: _,
})
| ty::Ref(_, pointee_ty, _) => {
if has_ptr_meta(tcx, pointee_ty) { if has_ptr_meta(tcx, pointee_ty) {
(pointer_ty(tcx), pointer_ty(tcx)) (pointer_ty(tcx), pointer_ty(tcx))
} else { } else {
return None return None;
} }
} }
_ => return None, _ => return None,
@ -103,8 +115,15 @@ fn clif_pair_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<(type
/// Is a pointer to this type a fat ptr? /// Is a pointer to this type a fat ptr?
pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool { pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
let ptr_ty = tcx.mk_ptr(TypeAndMut { ty, mutbl: rustc_hir::Mutability::Not }); let ptr_ty = tcx.mk_ptr(TypeAndMut {
match &tcx.layout_of(ParamEnv::reveal_all().and(ptr_ty)).unwrap().abi { ty,
mutbl: rustc_hir::Mutability::Not,
});
match &tcx
.layout_of(ParamEnv::reveal_all().and(ptr_ty))
.unwrap()
.abi
{
Abi::Scalar(_) => false, Abi::Scalar(_) => false,
Abi::ScalarPair(_, _) => true, Abi::ScalarPair(_, _) => true,
abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi), abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
@ -200,7 +219,11 @@ pub(crate) fn resolve_value_imm(func: &Function, val: Value) -> Option<u128> {
} }
} }
pub(crate) fn type_min_max_value(bcx: &mut FunctionBuilder<'_>, ty: Type, signed: bool) -> (Value, Value) { pub(crate) fn type_min_max_value(
bcx: &mut FunctionBuilder<'_>,
ty: Type,
signed: bool,
) -> (Value, Value) {
assert!(ty.is_int()); assert!(ty.is_int());
if ty == types::I128 { if ty == types::I128 {
@ -339,13 +362,11 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
T: TypeFoldable<'tcx> + Copy, T: TypeFoldable<'tcx> + Copy,
{ {
if let Some(substs) = self.instance.substs_for_mir_body() { if let Some(substs) = self.instance.substs_for_mir_body() {
self.tcx.subst_and_normalize_erasing_regions( self.tcx
substs, .subst_and_normalize_erasing_regions(substs, ty::ParamEnv::reveal_all(), value)
ty::ParamEnv::reveal_all(),
value,
)
} else { } else {
self.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value) self.tcx
.normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
} }
} }
@ -385,11 +406,7 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
caller.line as u32, caller.line as u32,
caller.col_display as u32 + 1, caller.col_display as u32 + 1,
)); ));
crate::constant::trans_const_value( crate::constant::trans_const_value(self, const_loc, self.tcx.caller_location_ty())
self,
const_loc,
self.tcx.caller_location_ty(),
)
} }
pub(crate) fn triple(&self) -> &target_lexicon::Triple { pub(crate) fn triple(&self) -> &target_lexicon::Triple {
@ -406,7 +423,8 @@ impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
let mut data_ctx = DataContext::new(); let mut data_ctx = DataContext::new();
data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice()); data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
let msg_id = self let msg_id = self
.cx.module .cx
.module
.declare_data( .declare_data(
&format!("__{}_{:08x}", prefix, msg_hash), &format!("__{}_{:08x}", prefix, msg_hash),
Linkage::Local, Linkage::Local,

View File

@ -1,5 +1,6 @@
use rustc_span::DUMMY_SP; use rustc_span::DUMMY_SP;
use rustc_data_structures::fx::FxHashSet;
use rustc_errors::ErrorReported; use rustc_errors::ErrorReported;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::{ use rustc_middle::mir::interpret::{
@ -7,7 +8,6 @@ use rustc_middle::mir::interpret::{
}; };
use rustc_middle::ty::{Const, ConstKind}; use rustc_middle::ty::{Const, ConstKind};
use rustc_target::abi::Align; use rustc_target::abi::Align;
use rustc_data_structures::fx::FxHashSet;
use cranelift_codegen::ir::GlobalValueData; use cranelift_codegen::ir::GlobalValueData;
use cranelift_module::*; use cranelift_module::*;
@ -41,19 +41,31 @@ pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, impl Backend>) {
match const_.val { match const_.val {
ConstKind::Value(_) => {} ConstKind::Value(_) => {}
ConstKind::Unevaluated(def, ref substs, promoted) => { ConstKind::Unevaluated(def, ref substs, promoted) => {
if let Err(err) = fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) { if let Err(err) =
fx.tcx
.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
{
match err { match err {
ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => { ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
fx.tcx.sess.span_err(constant.span, "erroneous constant encountered"); fx.tcx
.sess
.span_err(constant.span, "erroneous constant encountered");
} }
ErrorHandled::TooGeneric => { ErrorHandled::TooGeneric => {
span_bug!(constant.span, "codgen encountered polymorphic constant: {:?}", err); span_bug!(
constant.span,
"codgen encountered polymorphic constant: {:?}",
err
);
} }
} }
} }
} }
ConstKind::Param(_) | ConstKind::Infer(_) | ConstKind::Bound(_, _) ConstKind::Param(_)
| ConstKind::Placeholder(_) | ConstKind::Error(_) => unreachable!("{:?}", const_), | ConstKind::Infer(_)
| ConstKind::Bound(_, _)
| ConstKind::Placeholder(_)
| ConstKind::Error(_) => unreachable!("{:?}", const_),
} }
} }
} }
@ -86,7 +98,10 @@ fn codegen_static_ref<'tcx>(
fx.add_comment(local_data_id, format!("{:?}", def_id)); fx.add_comment(local_data_id, format!("{:?}", def_id));
let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id); let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
assert!(!layout.is_unsized(), "unsized statics aren't supported"); assert!(!layout.is_unsized(), "unsized statics aren't supported");
assert!(matches!(fx.bcx.func.global_values[local_data_id], GlobalValueData::Symbol { tls: false, ..}), "tls static referenced without Rvalue::ThreadLocalRef"); assert!(
matches!(fx.bcx.func.global_values[local_data_id], GlobalValueData::Symbol { tls: false, ..}),
"tls static referenced without Rvalue::ThreadLocalRef"
);
CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout) CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
} }
@ -105,14 +120,20 @@ pub(crate) fn trans_constant<'tcx>(
fx, fx,
def.did, def.did,
fx.layout_of(fx.monomorphize(&constant.literal.ty)), fx.layout_of(fx.monomorphize(&constant.literal.ty)),
).to_cvalue(fx); )
.to_cvalue(fx);
} }
ConstKind::Unevaluated(def, ref substs, promoted) => { ConstKind::Unevaluated(def, ref substs, promoted) => {
match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) { match fx
.tcx
.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None)
{
Ok(const_val) => const_val, Ok(const_val) => const_val,
Err(_) => { Err(_) => {
if promoted.is_none() { if promoted.is_none() {
fx.tcx.sess.span_err(constant.span, "erroneous constant encountered"); fx.tcx
.sess
.span_err(constant.span, "erroneous constant encountered");
} }
return crate::trap::trap_unreachable_ret_value( return crate::trap::trap_unreachable_ret_value(
fx, fx,
@ -122,8 +143,11 @@ pub(crate) fn trans_constant<'tcx>(
} }
} }
} }
ConstKind::Param(_) | ConstKind::Infer(_) | ConstKind::Bound(_, _) ConstKind::Param(_)
| ConstKind::Placeholder(_) | ConstKind::Error(_) => unreachable!("{:?}", const_), | ConstKind::Infer(_)
| ConstKind::Bound(_, _)
| ConstKind::Placeholder(_)
| ConstKind::Error(_) => unreachable!("{:?}", const_),
}; };
trans_const_value(fx, const_val, const_.ty) trans_const_value(fx, const_val, const_.ty)
@ -132,7 +156,7 @@ pub(crate) fn trans_constant<'tcx>(
pub(crate) fn trans_const_value<'tcx>( pub(crate) fn trans_const_value<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>, fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
const_val: ConstValue<'tcx>, const_val: ConstValue<'tcx>,
ty: Ty<'tcx> ty: Ty<'tcx>,
) -> CValue<'tcx> { ) -> CValue<'tcx> {
let layout = fx.layout_of(ty); let layout = fx.layout_of(ty);
assert!(!layout.is_unsized(), "sized const value"); assert!(!layout.is_unsized(), "sized const value");
@ -149,7 +173,9 @@ pub(crate) fn trans_const_value<'tcx>(
if fx.clif_type(layout.ty).is_none() { if fx.clif_type(layout.ty).is_none() {
let (size, align) = (layout.size, layout.align.pref); let (size, align) = (layout.size, layout.align.pref);
let mut alloc = Allocation::from_bytes( let mut alloc = Allocation::from_bytes(
std::iter::repeat(0).take(size.bytes_usize()).collect::<Vec<u8>>(), std::iter::repeat(0)
.take(size.bytes_usize())
.collect::<Vec<u8>>(),
align, align,
); );
let ptr = Pointer::new(AllocId(!0), Size::ZERO); // The alloc id is never used let ptr = Pointer::new(AllocId(!0), Size::ZERO); // The alloc id is never used
@ -168,44 +194,58 @@ pub(crate) fn trans_const_value<'tcx>(
let base_addr = match alloc_kind { let base_addr = match alloc_kind {
Some(GlobalAlloc::Memory(alloc)) => { Some(GlobalAlloc::Memory(alloc)) => {
fx.cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id)); fx.cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
let data_id = data_id_for_alloc_id(&mut fx.cx.module, ptr.alloc_id, alloc.align, alloc.mutability); let data_id = data_id_for_alloc_id(
let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func); &mut fx.cx.module,
ptr.alloc_id,
alloc.align,
alloc.mutability,
);
let local_data_id =
fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", ptr.alloc_id)); fx.add_comment(local_data_id, format!("{:?}", ptr.alloc_id));
fx.bcx.ins().global_value(fx.pointer_type, local_data_id) fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
} }
Some(GlobalAlloc::Function(instance)) => { Some(GlobalAlloc::Function(instance)) => {
let func_id = crate::abi::import_function(fx.tcx, &mut fx.cx.module, instance); let func_id =
let local_func_id = fx.cx.module.declare_func_in_func(func_id, &mut fx.bcx.func); crate::abi::import_function(fx.tcx, &mut fx.cx.module, instance);
let local_func_id =
fx.cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
fx.bcx.ins().func_addr(fx.pointer_type, local_func_id) fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
} }
Some(GlobalAlloc::Static(def_id)) => { Some(GlobalAlloc::Static(def_id)) => {
assert!(fx.tcx.is_static(def_id)); assert!(fx.tcx.is_static(def_id));
let data_id = data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false); let data_id =
let local_data_id = fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func); data_id_for_static(fx.tcx, &mut fx.cx.module, def_id, false);
let local_data_id =
fx.cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", def_id)); fx.add_comment(local_data_id, format!("{:?}", def_id));
fx.bcx.ins().global_value(fx.pointer_type, local_data_id) fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
} }
None => bug!("missing allocation {:?}", ptr.alloc_id), None => bug!("missing allocation {:?}", ptr.alloc_id),
}; };
let val = fx.bcx.ins().iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap()); let val = fx
.bcx
.ins()
.iadd_imm(base_addr, i64::try_from(ptr.offset.bytes()).unwrap());
return CValue::by_val(val, layout); return CValue::by_val(val, layout);
} }
} }
} }
ConstValue::ByRef { alloc, offset } => { ConstValue::ByRef { alloc, offset } => CValue::by_ref(
CValue::by_ref( pointer_for_allocation(fx, alloc)
pointer_for_allocation(fx, alloc) .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
.offset_i64(fx, i64::try_from(offset.bytes()).unwrap()), layout,
layout, ),
)
}
ConstValue::Slice { data, start, end } => { ConstValue::Slice { data, start, end } => {
let ptr = pointer_for_allocation(fx, data) let ptr = pointer_for_allocation(fx, data)
.offset_i64(fx, i64::try_from(start).unwrap()) .offset_i64(fx, i64::try_from(start).unwrap())
.get_addr(fx); .get_addr(fx);
let len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap()); let len = fx.bcx.ins().iconst(
fx.pointer_type,
i64::try_from(end.checked_sub(start).unwrap()).unwrap(),
);
CValue::by_val_pair(ptr, len, layout) CValue::by_val_pair(ptr, len, layout)
} }
} }
@ -254,7 +294,8 @@ fn data_id_for_static(
crate::linkage::get_static_linkage(tcx, def_id) crate::linkage::get_static_linkage(tcx, def_id)
} else { } else {
if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak) if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
|| rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny) { || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
{
Linkage::Preemptible Linkage::Preemptible
} else { } else {
Linkage::Import Linkage::Import
@ -309,7 +350,11 @@ fn data_id_for_static(
.unwrap(); .unwrap();
let mut data_ctx = DataContext::new(); let mut data_ctx = DataContext::new();
let data = module.declare_data_in_data(data_id, &mut data_ctx); let data = module.declare_data_in_data(data_id, &mut data_ctx);
data_ctx.define(std::iter::repeat(0).take(pointer_ty(tcx).bytes() as usize).collect()); data_ctx.define(
std::iter::repeat(0)
.take(pointer_ty(tcx).bytes() as usize)
.collect(),
);
data_ctx.write_data_addr(0, data, 0); data_ctx.write_data_addr(0, data, 0);
match module.define_data(ref_data_id, &data_ctx) { match module.define_data(ref_data_id, &data_ctx) {
// Every time the static is referenced there will be another definition of this global, // Every time the static is referenced there will be another definition of this global,
@ -338,7 +383,10 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut Module<impl Backend>, cx: &mu
TodoItem::Static(def_id) => { TodoItem::Static(def_id) => {
//println!("static {:?}", def_id); //println!("static {:?}", def_id);
let section_name = tcx.codegen_fn_attrs(def_id).link_section.map(|s| s.as_str()); let section_name = tcx
.codegen_fn_attrs(def_id)
.link_section
.map(|s| s.as_str());
let const_ = tcx.const_eval_poly(def_id).unwrap(); let const_ = tcx.const_eval_poly(def_id).unwrap();
@ -364,7 +412,9 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut Module<impl Backend>, cx: &mu
data_ctx.set_segment_section("", &*section_name); data_ctx.set_segment_section("", &*section_name);
} }
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec(); let bytes = alloc
.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
.to_vec();
data_ctx.define(bytes.into_boxed_slice()); data_ctx.define(bytes.into_boxed_slice());
for &(offset, (_tag, reloc)) in alloc.relocations().iter() { for &(offset, (_tag, reloc)) in alloc.relocations().iter() {
@ -372,7 +422,9 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut Module<impl Backend>, cx: &mu
let endianness = tcx.data_layout.endian; let endianness = tcx.data_layout.endian;
let offset = offset.bytes() as usize; let offset = offset.bytes() as usize;
let ptr_size = tcx.data_layout.pointer_size; let ptr_size = tcx.data_layout.pointer_size;
let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..offset + ptr_size.bytes() as usize); let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
offset..offset + ptr_size.bytes() as usize,
);
read_target_uint(endianness, bytes).unwrap() read_target_uint(endianness, bytes).unwrap()
}; };
@ -390,8 +442,15 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut Module<impl Backend>, cx: &mu
data_id_for_alloc_id(module, reloc, target_alloc.align, target_alloc.mutability) data_id_for_alloc_id(module, reloc, target_alloc.align, target_alloc.mutability)
} }
GlobalAlloc::Static(def_id) => { GlobalAlloc::Static(def_id) => {
if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { if tcx
tcx.sess.fatal(&format!("Allocation {:?} contains reference to TLS value {:?}", alloc, def_id)); .codegen_fn_attrs(def_id)
.flags
.contains(CodegenFnAttrFlags::THREAD_LOCAL)
{
tcx.sess.fatal(&format!(
"Allocation {:?} contains reference to TLS value {:?}",
alloc, def_id
));
} }
// Don't push a `TodoItem::Static` here, as it will cause statics used by // Don't push a `TodoItem::Static` here, as it will cause statics used by
@ -418,8 +477,9 @@ pub(crate) fn mir_operand_get_const_val<'tcx>(
) -> Option<&'tcx Const<'tcx>> { ) -> Option<&'tcx Const<'tcx>> {
match operand { match operand {
Operand::Copy(_) | Operand::Move(_) => None, Operand::Copy(_) | Operand::Move(_) => None,
Operand::Constant(const_) => { Operand::Constant(const_) => Some(
Some(fx.monomorphize(&const_.literal).eval(fx.tcx, ParamEnv::reveal_all())) fx.monomorphize(&const_.literal)
} .eval(fx.tcx, ParamEnv::reveal_all()),
),
} }
} }

View File

@ -83,7 +83,9 @@ impl WriterRelocate {
cranelift_module::FuncId::from_u32(sym.try_into().unwrap()), cranelift_module::FuncId::from_u32(sym.try_into().unwrap()),
); );
let val = (addr as u64 as i64 + reloc.addend) as u64; let val = (addr as u64 as i64 + reloc.addend) as u64;
self.writer.write_udata_at(reloc.offset as usize, val, reloc.size).unwrap(); self.writer
.write_udata_at(reloc.offset as usize, val, reloc.size)
.unwrap();
} }
} }
} }

View File

@ -3,13 +3,16 @@ use std::path::{Component, Path};
use crate::prelude::*; use crate::prelude::*;
use rustc_span::{FileName, SourceFile, SourceFileAndLine, Pos, SourceFileHash, SourceFileHashAlgorithm}; use rustc_span::{
FileName, Pos, SourceFile, SourceFileAndLine, SourceFileHash, SourceFileHashAlgorithm,
};
use cranelift_codegen::binemit::CodeOffset; use cranelift_codegen::binemit::CodeOffset;
use cranelift_codegen::machinst::MachSrcLoc; use cranelift_codegen::machinst::MachSrcLoc;
use gimli::write::{ use gimli::write::{
Address, AttributeValue, FileId, LineProgram, LineString, FileInfo, LineStringTable, UnitEntryId, Address, AttributeValue, FileId, FileInfo, LineProgram, LineString, LineStringTable,
UnitEntryId,
}; };
// OPTIMIZATION: It is cheaper to do this in one pass than using `.parent()` and `.file_name()`. // OPTIMIZATION: It is cheaper to do this in one pass than using `.parent()` and `.file_name()`.
@ -18,7 +21,11 @@ fn split_path_dir_and_file(path: &Path) -> (&Path, &OsStr) {
let file_name = match iter.next_back() { let file_name = match iter.next_back() {
Some(Component::Normal(p)) => p, Some(Component::Normal(p)) => p,
component => { component => {
panic!("Path component {:?} of path {} is an invalid filename", component, path.display()); panic!(
"Path component {:?} of path {} is an invalid filename",
component,
path.display()
);
} }
}; };
let parent = iter.as_path(); let parent = iter.as_path();
@ -27,11 +34,13 @@ fn split_path_dir_and_file(path: &Path) -> (&Path, &OsStr) {
// OPTIMIZATION: Avoid UTF-8 validation on UNIX. // OPTIMIZATION: Avoid UTF-8 validation on UNIX.
fn osstr_as_utf8_bytes(path: &OsStr) -> &[u8] { fn osstr_as_utf8_bytes(path: &OsStr) -> &[u8] {
#[cfg(unix)] { #[cfg(unix)]
{
use std::os::unix::ffi::OsStrExt; use std::os::unix::ffi::OsStrExt;
return path.as_bytes(); return path.as_bytes();
} }
#[cfg(not(unix))] { #[cfg(not(unix))]
{
return path.to_str().unwrap().as_bytes(); return path.to_str().unwrap().as_bytes();
} }
} }
@ -69,11 +78,7 @@ fn line_program_add_file(
} else { } else {
line_program.default_directory() line_program.default_directory()
}; };
let file_name = LineString::new( let file_name = LineString::new(file_name, line_program.encoding(), line_strings);
file_name,
line_program.encoding(),
line_strings,
);
let info = make_file_info(file.src_hash); let info = make_file_info(file.src_hash);
@ -149,8 +154,7 @@ impl<'tcx> DebugContext<'tcx> {
// In order to have a good line stepping behavior in debugger, we overwrite debug // In order to have a good line stepping behavior in debugger, we overwrite debug
// locations of macro expansions with that of the outermost expansion site // locations of macro expansions with that of the outermost expansion site
// (unless the crate is being compiled with `-Z debug-macros`). // (unless the crate is being compiled with `-Z debug-macros`).
let span = if !span.from_expansion() || let span = if !span.from_expansion() || tcx.sess.opts.debugging_opts.debug_macros {
tcx.sess.opts.debugging_opts.debug_macros {
span span
} else { } else {
// Walk up the macro expansion chain until we reach a non-expanded span. // Walk up the macro expansion chain until we reach a non-expanded span.
@ -163,9 +167,13 @@ impl<'tcx> DebugContext<'tcx> {
Ok(SourceFileAndLine { sf: file, line }) => { Ok(SourceFileAndLine { sf: file, line }) => {
let line_pos = file.line_begin_pos(span.lo()); let line_pos = file.line_begin_pos(span.lo());
(file, u64::try_from(line).unwrap() + 1, u64::from((span.lo() - line_pos).to_u32()) + 1) (
file,
u64::try_from(line).unwrap() + 1,
u64::from((span.lo() - line_pos).to_u32()) + 1,
)
} }
Err(file) => (file, 0, 0) Err(file) => (file, 0, 0),
}; };
// line_program_add_file is very slow. // line_program_add_file is very slow.
@ -188,10 +196,7 @@ impl<'tcx> DebugContext<'tcx> {
line_program.generate_row(); line_program.generate_row();
}; };
line_program.begin_sequence(Some(Address::Symbol { line_program.begin_sequence(Some(Address::Symbol { symbol, addend: 0 }));
symbol,
addend: 0,
}));
let mut func_end = 0; let mut func_end = 0;
@ -220,7 +225,8 @@ impl<'tcx> DebugContext<'tcx> {
let srcloc = func.srclocs[inst]; let srcloc = func.srclocs[inst];
line_program.row().address_offset = u64::from(offset); line_program.row().address_offset = u64::from(offset);
if !srcloc.is_default() { if !srcloc.is_default() {
let source_info = *source_info_set.get_index(srcloc.bits() as usize).unwrap(); let source_info =
*source_info_set.get_index(srcloc.bits() as usize).unwrap();
create_row_for_span(line_program, source_info.span); create_row_for_span(line_program, source_info.span);
} else { } else {
create_row_for_span(line_program, function_span); create_row_for_span(line_program, function_span);
@ -236,12 +242,12 @@ impl<'tcx> DebugContext<'tcx> {
let entry = self.dwarf.unit.get_mut(entry_id); let entry = self.dwarf.unit.get_mut(entry_id);
entry.set( entry.set(
gimli::DW_AT_low_pc, gimli::DW_AT_low_pc,
AttributeValue::Address(Address::Symbol { AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
symbol, );
addend: 0, entry.set(
}), gimli::DW_AT_high_pc,
AttributeValue::Udata(u64::from(func_end)),
); );
entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(func_end)));
self.emit_location(entry_id, function_span); self.emit_location(entry_id, function_span);

View File

@ -10,8 +10,8 @@ use cranelift_codegen::isa::TargetIsa;
use cranelift_codegen::ValueLocRange; use cranelift_codegen::ValueLocRange;
use gimli::write::{ use gimli::write::{
Address, AttributeValue, DwarfUnit, Expression, LineProgram, Address, AttributeValue, DwarfUnit, Expression, LineProgram, LineString, Location,
LineString, Location, LocationList, Range, RangeList, UnitEntryId, LocationList, Range, RangeList, UnitEntryId,
}; };
use gimli::{Encoding, Format, LineEncoding, RunTimeEndian, X86_64}; use gimli::{Encoding, Format, LineEncoding, RunTimeEndian, X86_64};
@ -66,7 +66,7 @@ impl<'tcx> DebugContext<'tcx> {
Some(path) => { Some(path) => {
let name = path.to_string_lossy().into_owned(); let name = path.to_string_lossy().into_owned();
(name, None) (name, None)
}, }
None => (tcx.crate_name(LOCAL_CRATE).to_string(), None), None => (tcx.crate_name(LOCAL_CRATE).to_string(), None),
}; };
@ -141,7 +141,10 @@ impl<'tcx> DebugContext<'tcx> {
}; };
let type_entry = self.dwarf.unit.get_mut(type_id); let type_entry = self.dwarf.unit.get_mut(type_id);
type_entry.set(gimli::DW_AT_name, AttributeValue::String(format!("{}", ty).replace('i', "u").into_bytes())); type_entry.set(
gimli::DW_AT_name,
AttributeValue::String(format!("{}", ty).replace('i', "u").into_bytes()),
);
type_entry.set( type_entry.set(
gimli::DW_AT_byte_size, gimli::DW_AT_byte_size,
AttributeValue::Udata(u64::from(ty.bytes())), AttributeValue::Udata(u64::from(ty.bytes())),
@ -202,18 +205,29 @@ impl<'tcx> DebugContext<'tcx> {
for (field_idx, field_def) in variant.fields.iter().enumerate() { for (field_idx, field_def) in variant.fields.iter().enumerate() {
let field_offset = layout.fields.offset(field_idx); let field_offset = layout.fields.offset(field_idx);
let field_layout = layout.field(&layout::LayoutCx { let field_layout = layout
tcx: self.tcx, .field(
param_env: ParamEnv::reveal_all(), &layout::LayoutCx {
}, field_idx).unwrap(); tcx: self.tcx,
param_env: ParamEnv::reveal_all(),
},
field_idx,
)
.unwrap();
let field_type = self.dwarf_ty(field_layout.ty); let field_type = self.dwarf_ty(field_layout.ty);
let field_id = self.dwarf.unit.add(type_id, gimli::DW_TAG_member); let field_id = self.dwarf.unit.add(type_id, gimli::DW_TAG_member);
let field_entry = self.dwarf.unit.get_mut(field_id); let field_entry = self.dwarf.unit.get_mut(field_id);
field_entry.set(gimli::DW_AT_name, AttributeValue::String(field_def.ident.as_str().to_string().into_bytes())); field_entry.set(
field_entry.set(gimli::DW_AT_data_member_location, AttributeValue::Udata(field_offset.bytes())); gimli::DW_AT_name,
AttributeValue::String(field_def.ident.as_str().to_string().into_bytes()),
);
field_entry.set(
gimli::DW_AT_data_member_location,
AttributeValue::Udata(field_offset.bytes()),
);
field_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(field_type)); field_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(field_type));
} }
@ -238,10 +252,7 @@ impl<'tcx> DebugContext<'tcx> {
fn define_local(&mut self, scope: UnitEntryId, name: String, ty: Ty<'tcx>) -> UnitEntryId { fn define_local(&mut self, scope: UnitEntryId, name: String, ty: Ty<'tcx>) -> UnitEntryId {
let dw_ty = self.dwarf_ty(ty); let dw_ty = self.dwarf_ty(ty);
let var_id = self let var_id = self.dwarf.unit.add(scope, gimli::DW_TAG_variable);
.dwarf
.unit
.add(scope, gimli::DW_TAG_variable);
let var_entry = self.dwarf.unit.get_mut(var_id); let var_entry = self.dwarf.unit.get_mut(var_id);
var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes())); var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
@ -266,34 +277,23 @@ impl<'tcx> DebugContext<'tcx> {
// FIXME: add to appropriate scope instead of root // FIXME: add to appropriate scope instead of root
let scope = self.dwarf.unit.root(); let scope = self.dwarf.unit.root();
let entry_id = self let entry_id = self.dwarf.unit.add(scope, gimli::DW_TAG_subprogram);
.dwarf
.unit
.add(scope, gimli::DW_TAG_subprogram);
let entry = self.dwarf.unit.get_mut(entry_id); let entry = self.dwarf.unit.get_mut(entry_id);
let name_id = self.dwarf.strings.add(name); let name_id = self.dwarf.strings.add(name);
// Gdb requires DW_AT_name. Otherwise the DW_TAG_subprogram is skipped. // Gdb requires DW_AT_name. Otherwise the DW_TAG_subprogram is skipped.
entry.set( entry.set(gimli::DW_AT_name, AttributeValue::StringRef(name_id));
gimli::DW_AT_name,
AttributeValue::StringRef(name_id),
);
entry.set( entry.set(
gimli::DW_AT_linkage_name, gimli::DW_AT_linkage_name,
AttributeValue::StringRef(name_id), AttributeValue::StringRef(name_id),
); );
let end = self.create_debug_lines(isa, symbol, entry_id, context, mir.span, source_info_set); let end =
self.create_debug_lines(isa, symbol, entry_id, context, mir.span, source_info_set);
self self.unit_range_list.0.push(Range::StartLength {
.unit_range_list begin: Address::Symbol { symbol, addend: 0 },
.0 length: u64::from(end),
.push(Range::StartLength { });
begin: Address::Symbol {
symbol,
addend: 0,
},
length: u64::from(end),
});
if isa.get_mach_backend().is_some() { if isa.get_mach_backend().is_some() {
return; // Not yet implemented for the AArch64 backend. return; // Not yet implemented for the AArch64 backend.
@ -301,29 +301,49 @@ impl<'tcx> DebugContext<'tcx> {
let func_entry = self.dwarf.unit.get_mut(entry_id); let func_entry = self.dwarf.unit.get_mut(entry_id);
// Gdb requires both DW_AT_low_pc and DW_AT_high_pc. Otherwise the DW_TAG_subprogram is skipped. // Gdb requires both DW_AT_low_pc and DW_AT_high_pc. Otherwise the DW_TAG_subprogram is skipped.
func_entry.set(gimli::DW_AT_low_pc, AttributeValue::Address(Address::Symbol { func_entry.set(
symbol, gimli::DW_AT_low_pc,
addend: 0, AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
})); );
// Using Udata for DW_AT_high_pc requires at least DWARF4 // Using Udata for DW_AT_high_pc requires at least DWARF4
func_entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(end))); func_entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(end)));
// FIXME Remove once actual debuginfo for locals works. // FIXME Remove once actual debuginfo for locals works.
for (i, (param, &val)) in context.func.signature.params.iter().zip(context.func.dfg.block_params(context.func.layout.entry_block().unwrap())).enumerate() { for (i, (param, &val)) in context
.func
.signature
.params
.iter()
.zip(
context
.func
.dfg
.block_params(context.func.layout.entry_block().unwrap()),
)
.enumerate()
{
use cranelift_codegen::ir::ArgumentPurpose; use cranelift_codegen::ir::ArgumentPurpose;
let base_name = match param.purpose { let base_name = match param.purpose {
ArgumentPurpose::Normal => "arg", ArgumentPurpose::Normal => "arg",
ArgumentPurpose::StructArgument(_) => "struct_arg", ArgumentPurpose::StructArgument(_) => "struct_arg",
ArgumentPurpose::StructReturn => "sret", ArgumentPurpose::StructReturn => "sret",
ArgumentPurpose::Link | ArgumentPurpose::FramePointer | ArgumentPurpose::CalleeSaved => continue, ArgumentPurpose::Link
ArgumentPurpose::VMContext | ArgumentPurpose::SignatureId | ArgumentPurpose::StackLimit => unreachable!(), | ArgumentPurpose::FramePointer
| ArgumentPurpose::CalleeSaved => continue,
ArgumentPurpose::VMContext
| ArgumentPurpose::SignatureId
| ArgumentPurpose::StackLimit => unreachable!(),
}; };
let name = format!("{}{}", base_name, i); let name = format!("{}{}", base_name, i);
let dw_ty = self.dwarf_ty_for_clif_ty(param.value_type); let dw_ty = self.dwarf_ty_for_clif_ty(param.value_type);
let loc = translate_loc(isa, context.func.locations[val], &context.func.stack_slots).unwrap(); let loc =
translate_loc(isa, context.func.locations[val], &context.func.stack_slots).unwrap();
let arg_id = self.dwarf.unit.add(entry_id, gimli::DW_TAG_formal_parameter); let arg_id = self
.dwarf
.unit
.add(entry_id, gimli::DW_TAG_formal_parameter);
let var_entry = self.dwarf.unit.get_mut(arg_id); let var_entry = self.dwarf.unit.get_mut(arg_id);
var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes())); var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
@ -371,8 +391,10 @@ fn place_location<'tcx>(
symbol: usize, symbol: usize,
context: &Context, context: &Context,
local_map: &FxHashMap<mir::Local, CPlace<'tcx>>, local_map: &FxHashMap<mir::Local, CPlace<'tcx>>,
#[allow(rustc::default_hash_types)] #[allow(rustc::default_hash_types)] value_labels_ranges: &std::collections::HashMap<
value_labels_ranges: &std::collections::HashMap<ValueLabel, Vec<ValueLocRange>>, ValueLabel,
Vec<ValueLocRange>,
>,
place: Place<'tcx>, place: Place<'tcx>,
) -> AttributeValue { ) -> AttributeValue {
assert!(place.projection.is_empty()); // FIXME implement them assert!(place.projection.is_empty()); // FIXME implement them
@ -393,7 +415,12 @@ fn place_location<'tcx>(
symbol, symbol,
addend: i64::from(value_loc_range.end), addend: i64::from(value_loc_range.end),
}, },
data: translate_loc(isa, value_loc_range.loc, &context.func.stack_slots).unwrap(), data: translate_loc(
isa,
value_loc_range.loc,
&context.func.stack_slots,
)
.unwrap(),
}) })
.collect(), .collect(),
); );
@ -428,7 +455,11 @@ fn place_location<'tcx>(
} }
// Adapted from https://github.com/CraneStation/wasmtime/blob/5a1845b4caf7a5dba8eda1fef05213a532ed4259/crates/debug/src/transform/expression.rs#L59-L137 // Adapted from https://github.com/CraneStation/wasmtime/blob/5a1845b4caf7a5dba8eda1fef05213a532ed4259/crates/debug/src/transform/expression.rs#L59-L137
fn translate_loc(isa: &dyn TargetIsa, loc: ValueLoc, stack_slots: &StackSlots) -> Option<Expression> { fn translate_loc(
isa: &dyn TargetIsa,
loc: ValueLoc,
stack_slots: &StackSlots,
) -> Option<Expression> {
match loc { match loc {
ValueLoc::Reg(reg) => { ValueLoc::Reg(reg) => {
let machine_reg = isa.map_dwarf_register(reg).unwrap(); let machine_reg = isa.map_dwarf_register(reg).unwrap();

View File

@ -1,6 +1,6 @@
use crate::prelude::*; use crate::prelude::*;
use cranelift_codegen::isa::{TargetIsa, unwind::UnwindInfo}; use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
use gimli::write::{Address, CieId, EhFrame, FrameTable, Section}; use gimli::write::{Address, CieId, EhFrame, FrameTable, Section};
@ -13,13 +13,9 @@ pub(crate) struct UnwindContext<'tcx> {
} }
impl<'tcx> UnwindContext<'tcx> { impl<'tcx> UnwindContext<'tcx> {
pub(crate) fn new( pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
tcx: TyCtxt<'tcx>,
isa: &dyn TargetIsa,
) -> Self {
let mut frame_table = FrameTable::default(); let mut frame_table = FrameTable::default();
let cie_id = if let Some(cie) = isa.create_systemv_cie() { let cie_id = if let Some(cie) = isa.create_systemv_cie() {
Some(frame_table.add_cie(cie)) Some(frame_table.add_cie(cie))
} else { } else {
@ -42,11 +38,14 @@ impl<'tcx> UnwindContext<'tcx> {
match unwind_info { match unwind_info {
UnwindInfo::SystemV(unwind_info) => { UnwindInfo::SystemV(unwind_info) => {
self.frame_table.add_fde(self.cie_id.unwrap(), unwind_info.to_fde(Address::Symbol { self.frame_table.add_fde(
symbol: func_id.as_u32() as usize, self.cie_id.unwrap(),
addend: 0, unwind_info.to_fde(Address::Symbol {
})); symbol: func_id.as_u32() as usize,
}, addend: 0,
}),
);
}
UnwindInfo::WindowsX64(_) => { UnwindInfo::WindowsX64(_) => {
// FIXME implement this // FIXME implement this
} }
@ -54,7 +53,9 @@ impl<'tcx> UnwindContext<'tcx> {
} }
pub(crate) fn emit<P: WriteDebugInfo>(self, product: &mut P) { pub(crate) fn emit<P: WriteDebugInfo>(self, product: &mut P) {
let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(self.tcx))); let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
self.tcx,
)));
self.frame_table.write_eh_frame(&mut eh_frame).unwrap(); self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
if !eh_frame.0.writer.slice().is_empty() { if !eh_frame.0.writer.slice().is_empty() {
@ -74,7 +75,9 @@ impl<'tcx> UnwindContext<'tcx> {
self, self,
jit_module: &mut Module<cranelift_simplejit::SimpleJITBackend>, jit_module: &mut Module<cranelift_simplejit::SimpleJITBackend>,
) -> Option<UnwindRegistry> { ) -> Option<UnwindRegistry> {
let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(self.tcx))); let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(
self.tcx,
)));
self.frame_table.write_eh_frame(&mut eh_frame).unwrap(); self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
if eh_frame.0.writer.slice().is_empty() { if eh_frame.0.writer.slice().is_empty() {

View File

@ -1,6 +1,6 @@
//! Adapted from https://github.com/rust-lang/rust/blob/d760df5aea483aae041c9a241e7acacf48f75035/src/librustc_codegen_ssa/mir/place.rs //! Adapted from https://github.com/rust-lang/rust/blob/d760df5aea483aae041c9a241e7acacf48f75035/src/librustc_codegen_ssa/mir/place.rs
use rustc_target::abi::{TagEncoding, Int, Variants}; use rustc_target::abi::{Int, TagEncoding, Variants};
use crate::prelude::*; use crate::prelude::*;

View File

@ -1,13 +1,13 @@
use std::path::PathBuf; use std::path::PathBuf;
use rustc_codegen_ssa::back::linker::LinkerInfo;
use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_middle::middle::cstore::EncodedMetadata; use rustc_middle::middle::cstore::EncodedMetadata;
use rustc_middle::mir::mono::CodegenUnit; use rustc_middle::mir::mono::CodegenUnit;
use rustc_session::config::{DebugInfo, OutputType};
use rustc_session::cgu_reuse_tracker::CguReuse; use rustc_session::cgu_reuse_tracker::CguReuse;
use rustc_codegen_ssa::back::linker::LinkerInfo; use rustc_session::config::{DebugInfo, OutputType};
use rustc_codegen_ssa::{CrateInfo, CodegenResults, CompiledModule, ModuleKind};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use crate::prelude::*; use crate::prelude::*;
@ -21,7 +21,6 @@ fn new_module(tcx: TyCtxt<'_>, name: String) -> Module<crate::backend::Backend>
struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>); struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>);
impl<HCX> HashStable<HCX> for ModuleCodegenResult { impl<HCX> HashStable<HCX> for ModuleCodegenResult {
fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) { fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
// do nothing // do nothing
@ -37,7 +36,8 @@ fn emit_module<B: Backend>(
unwind_context: UnwindContext<'_>, unwind_context: UnwindContext<'_>,
map_product: impl FnOnce(B::Product) -> B::Product, map_product: impl FnOnce(B::Product) -> B::Product,
) -> ModuleCodegenResult ) -> ModuleCodegenResult
where B::Product: AddConstructor + Emit + WriteDebugInfo, where
B::Product: AddConstructor + Emit + WriteDebugInfo,
{ {
module.finalize_definitions(); module.finalize_definitions();
let mut product = module.finish(); let mut product = module.finish();
@ -55,7 +55,8 @@ fn emit_module<B: Backend>(
.temp_path(OutputType::Object, Some(&name)); .temp_path(OutputType::Object, Some(&name));
let obj = product.emit(); let obj = product.emit();
if let Err(err) = std::fs::write(&tmp_file, obj) { if let Err(err) = std::fs::write(&tmp_file, obj) {
tcx.sess.fatal(&format!("error writing object file: {}", err)); tcx.sess
.fatal(&format!("error writing object file: {}", err));
} }
let work_product = if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() { let work_product = if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() {
@ -88,7 +89,9 @@ fn reuse_workproduct_for_cgu(
let mut object = None; let mut object = None;
let work_product = cgu.work_product(tcx); let work_product = cgu.work_product(tcx);
if let Some(saved_file) = &work_product.saved_file { if let Some(saved_file) = &work_product.saved_file {
let obj_out = tcx.output_filenames(LOCAL_CRATE).temp_path(OutputType::Object, Some(&cgu.name().as_str())); let obj_out = tcx
.output_filenames(LOCAL_CRATE)
.temp_path(OutputType::Object, Some(&cgu.name().as_str()));
object = Some(obj_out.clone()); object = Some(obj_out.clone());
let source_file = rustc_incremental::in_incr_comp_dir(&incr_comp_session_dir, &saved_file); let source_file = rustc_incremental::in_incr_comp_dir(&incr_comp_session_dir, &saved_file);
if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) { if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) {
@ -120,22 +123,30 @@ fn module_codegen(tcx: TyCtxt<'_>, cgu_name: rustc_span::Symbol) -> ModuleCodege
// Initialize the global atomic mutex using a constructor for proc-macros. // Initialize the global atomic mutex using a constructor for proc-macros.
// FIXME implement atomic instructions in Cranelift. // FIXME implement atomic instructions in Cranelift.
let mut init_atomics_mutex_from_constructor = None; let mut init_atomics_mutex_from_constructor = None;
if tcx.sess.crate_types().contains(&rustc_session::config::CrateType::ProcMacro) { if tcx
if mono_items.iter().any(|(mono_item, _)| { .sess
match mono_item { .crate_types()
rustc_middle::mir::mono::MonoItem::Static(def_id) => { .contains(&rustc_session::config::CrateType::ProcMacro)
tcx.symbol_name(Instance::mono(tcx, *def_id)).name.contains("__rustc_proc_macro_decls_") {
} if mono_items.iter().any(|(mono_item, _)| match mono_item {
_ => false, rustc_middle::mir::mono::MonoItem::Static(def_id) => tcx
} .symbol_name(Instance::mono(tcx, *def_id))
.name
.contains("__rustc_proc_macro_decls_"),
_ => false,
}) { }) {
init_atomics_mutex_from_constructor = Some(crate::atomic_shim::init_global_lock_constructor(&mut module, &format!("{}_init_atomics_mutex", cgu_name.as_str()))); init_atomics_mutex_from_constructor =
Some(crate::atomic_shim::init_global_lock_constructor(
&mut module,
&format!("{}_init_atomics_mutex", cgu_name.as_str()),
));
} }
} }
let mut cx = crate::CodegenCx::new(tcx, module, tcx.sess.opts.debuginfo != DebugInfo::None); let mut cx = crate::CodegenCx::new(tcx, module, tcx.sess.opts.debuginfo != DebugInfo::None);
super::codegen_mono_items(&mut cx, mono_items); super::codegen_mono_items(&mut cx, mono_items);
let (mut module, global_asm, debug, mut unwind_context) = tcx.sess.time("finalize CodegenCx", || cx.finalize()); let (mut module, global_asm, debug, mut unwind_context) =
tcx.sess.time("finalize CodegenCx", || cx.finalize());
crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context); crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context);
let codegen_result = emit_module( let codegen_result = emit_module(
@ -151,7 +162,7 @@ fn module_codegen(tcx: TyCtxt<'_>, cgu_name: rustc_span::Symbol) -> ModuleCodege
} }
product product
} },
); );
codegen_global_asm(tcx, &cgu.name().as_str(), &global_asm); codegen_global_asm(tcx, &cgu.name().as_str(), &global_asm);
@ -181,40 +192,46 @@ pub(super) fn run_aot(
} }
let modules = super::time(tcx, "codegen mono items", || { let modules = super::time(tcx, "codegen mono items", || {
cgus.iter().map(|cgu| { cgus.iter()
let cgu_reuse = determine_cgu_reuse(tcx, cgu); .map(|cgu| {
tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); let cgu_reuse = determine_cgu_reuse(tcx, cgu);
tcx.sess
.cgu_reuse_tracker
.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
match cgu_reuse { match cgu_reuse {
_ if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() => {} _ if std::env::var("CG_CLIF_INCR_CACHE_DISABLED").is_ok() => {}
CguReuse::No => {} CguReuse::No => {}
CguReuse::PreLto => { CguReuse::PreLto => {
return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products); return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
}
CguReuse::PostLto => unreachable!(),
} }
CguReuse::PostLto => unreachable!(),
}
let dep_node = cgu.codegen_dep_node(tcx); let dep_node = cgu.codegen_dep_node(tcx);
let (ModuleCodegenResult(module, work_product), _) = let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
tcx.dep_graph.with_task(dep_node, tcx, cgu.name(), module_codegen, rustc_middle::dep_graph::hash_result); dep_node,
tcx,
cgu.name(),
module_codegen,
rustc_middle::dep_graph::hash_result,
);
if let Some((id, product)) = work_product { if let Some((id, product)) = work_product {
work_products.insert(id, product); work_products.insert(id, product);
} }
module module
}).collect::<Vec<_>>() })
.collect::<Vec<_>>()
}); });
tcx.sess.abort_if_errors(); tcx.sess.abort_if_errors();
let mut allocator_module = new_module(tcx, "allocator_shim".to_string()); let mut allocator_module = new_module(tcx, "allocator_shim".to_string());
let mut allocator_unwind_context = UnwindContext::new(tcx, allocator_module.isa()); let mut allocator_unwind_context = UnwindContext::new(tcx, allocator_module.isa());
let created_alloc_shim = crate::allocator::codegen( let created_alloc_shim =
tcx, crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
&mut allocator_module,
&mut allocator_unwind_context,
);
let allocator_module = if created_alloc_shim { let allocator_module = if created_alloc_shim {
let ModuleCodegenResult(module, work_product) = emit_module( let ModuleCodegenResult(module, work_product) = emit_module(
@ -257,7 +274,8 @@ pub(super) fn run_aot(
}); });
if let Err(err) = std::fs::write(&tmp_file, obj) { if let Err(err) = std::fs::write(&tmp_file, obj) {
tcx.sess.fatal(&format!("error writing metadata object file: {}", err)); tcx.sess
.fatal(&format!("error writing metadata object file: {}", err));
} }
(metadata_cgu_name, tmp_file) (metadata_cgu_name, tmp_file)
@ -277,17 +295,20 @@ pub(super) fn run_aot(
rustc_incremental::assert_module_sources::assert_module_sources(tcx); rustc_incremental::assert_module_sources::assert_module_sources(tcx);
} }
Box::new((CodegenResults { Box::new((
crate_name: tcx.crate_name(LOCAL_CRATE), CodegenResults {
modules, crate_name: tcx.crate_name(LOCAL_CRATE),
allocator_module, modules,
metadata_module, allocator_module,
crate_hash: tcx.crate_hash(LOCAL_CRATE), metadata_module,
metadata, crate_hash: tcx.crate_hash(LOCAL_CRATE),
windows_subsystem: None, // Windows is not yet supported metadata,
linker_info: LinkerInfo::new(tcx), windows_subsystem: None, // Windows is not yet supported
crate_info: CrateInfo::new(tcx), linker_info: LinkerInfo::new(tcx),
}, work_products)) crate_info: CrateInfo::new(tcx),
},
work_products,
))
} }
fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) { fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
@ -308,9 +329,12 @@ fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
// FIXME fix linker error on macOS // FIXME fix linker error on macOS
if cfg!(not(feature = "inline_asm")) { if cfg!(not(feature = "inline_asm")) {
tcx.sess.fatal("asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift"); tcx.sess.fatal(
"asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
);
} else { } else {
tcx.sess.fatal("asm! and global_asm! are not yet supported on macOS and Windows"); tcx.sess
.fatal("asm! and global_asm! are not yet supported on macOS and Windows");
} }
} }
@ -318,13 +342,17 @@ fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld"); let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld");
// Remove all LLVM style comments // Remove all LLVM style comments
let global_asm = global_asm.lines().map(|line| { let global_asm = global_asm
if let Some(index) = line.find("//") { .lines()
&line[0..index] .map(|line| {
} else { if let Some(index) = line.find("//") {
line &line[0..index]
} } else {
}).collect::<Vec<_>>().join("\n"); line
}
})
.collect::<Vec<_>>()
.join("\n");
let output_object_file = tcx let output_object_file = tcx
.output_filenames(LOCAL_CRATE) .output_filenames(LOCAL_CRATE)
@ -333,14 +361,21 @@ fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
// Assemble `global_asm` // Assemble `global_asm`
let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm"); let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
let mut child = Command::new(assembler) let mut child = Command::new(assembler)
.arg("-o").arg(&global_asm_object_file) .arg("-o")
.arg(&global_asm_object_file)
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.spawn() .spawn()
.expect("Failed to spawn `as`."); .expect("Failed to spawn `as`.");
child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap(); child
.stdin
.take()
.unwrap()
.write_all(global_asm.as_bytes())
.unwrap();
let status = child.wait().expect("Failed to wait for `as`."); let status = child.wait().expect("Failed to wait for `as`.");
if !status.success() { if !status.success() {
tcx.sess.fatal(&format!("Failed to assemble `{}`", global_asm)); tcx.sess
.fatal(&format!("Failed to assemble `{}`", global_asm));
} }
// Link the global asm and main object file together // Link the global asm and main object file together
@ -348,7 +383,8 @@ fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
std::fs::rename(&output_object_file, &main_object_file).unwrap(); std::fs::rename(&output_object_file, &main_object_file).unwrap();
let status = Command::new(linker) let status = Command::new(linker)
.arg("-r") // Create a new object file .arg("-r") // Create a new object file
.arg("-o").arg(output_object_file) .arg("-o")
.arg(output_object_file)
.arg(&main_object_file) .arg(&main_object_file)
.arg(&global_asm_object_file) .arg(&global_asm_object_file)
.status() .status()
@ -383,7 +419,11 @@ fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguR
} }
let work_product_id = &cgu.work_product_id(); let work_product_id = &cgu.work_product_id();
if tcx.dep_graph.previous_work_product(work_product_id).is_none() { if tcx
.dep_graph
.previous_work_product(work_product_id)
.is_none()
{
// We don't have anything cached for this CGU. This can happen // We don't have anything cached for this CGU. This can happen
// if the CGU did not exist in the previous session. // if the CGU did not exist in the previous session.
return CguReuse::No; return CguReuse::No;

View File

@ -11,9 +11,18 @@ pub(super) fn run_jit(tcx: TyCtxt<'_>) -> ! {
// Rustc opens us without the RTLD_GLOBAL flag, so __cg_clif_global_atomic_mutex will not be // Rustc opens us without the RTLD_GLOBAL flag, so __cg_clif_global_atomic_mutex will not be
// exported. We fix this by opening ourself again as global. // exported. We fix this by opening ourself again as global.
// FIXME remove once atomic_shim is gone // FIXME remove once atomic_shim is gone
let cg_dylib = std::ffi::OsString::from(&tcx.sess.opts.debugging_opts.codegen_backend.as_ref().unwrap()); let cg_dylib = std::ffi::OsString::from(
std::mem::forget(libloading::os::unix::Library::open(Some(cg_dylib), libc::RTLD_NOW | libc::RTLD_GLOBAL).unwrap()); &tcx.sess
.opts
.debugging_opts
.codegen_backend
.as_ref()
.unwrap(),
);
std::mem::forget(
libloading::os::unix::Library::open(Some(cg_dylib), libc::RTLD_NOW | libc::RTLD_GLOBAL)
.unwrap(),
);
let imported_symbols = load_imported_symbols_for_jit(tcx); let imported_symbols = load_imported_symbols_for_jit(tcx);
@ -54,10 +63,11 @@ pub(super) fn run_jit(tcx: TyCtxt<'_>) -> ! {
let mut cx = crate::CodegenCx::new(tcx, jit_module, false); let mut cx = crate::CodegenCx::new(tcx, jit_module, false);
let (mut jit_module, global_asm, _debug, mut unwind_context) = super::time(tcx, "codegen mono items", || { let (mut jit_module, global_asm, _debug, mut unwind_context) =
super::codegen_mono_items(&mut cx, mono_items); super::time(tcx, "codegen mono items", || {
tcx.sess.time("finalize CodegenCx", || cx.finalize()) super::codegen_mono_items(&mut cx, mono_items);
}); tcx.sess.time("finalize CodegenCx", || cx.finalize())
});
if !global_asm.is_empty() { if !global_asm.is_empty() {
tcx.sess.fatal("Global asm is not supported in JIT mode"); tcx.sess.fatal("Global asm is not supported in JIT mode");
} }

View File

@ -17,13 +17,17 @@ pub(crate) fn codegen_crate(
tcx.sess.abort_if_errors(); tcx.sess.abort_if_errors();
if std::env::var("CG_CLIF_JIT").is_ok() if std::env::var("CG_CLIF_JIT").is_ok()
&& tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable) && tcx
.sess
.crate_types()
.contains(&rustc_session::config::CrateType::Executable)
{ {
#[cfg(feature = "jit")] #[cfg(feature = "jit")]
let _: ! = jit::run_jit(tcx); let _: ! = jit::run_jit(tcx);
#[cfg(not(feature = "jit"))] #[cfg(not(feature = "jit"))]
tcx.sess.fatal("jit support was disabled when compiling rustc_codegen_cranelift"); tcx.sess
.fatal("jit support was disabled when compiling rustc_codegen_cranelift");
} }
aot::run_aot(tcx, metadata, need_metadata_module) aot::run_aot(tcx, metadata, need_metadata_module)
@ -37,8 +41,12 @@ fn codegen_mono_items<'tcx>(
for &(mono_item, (linkage, visibility)) in &mono_items { for &(mono_item, (linkage, visibility)) in &mono_items {
match mono_item { match mono_item {
MonoItem::Fn(instance) => { MonoItem::Fn(instance) => {
let (name, sig) = let (name, sig) = get_function_name_and_sig(
get_function_name_and_sig(cx.tcx, cx.module.isa().triple(), instance, false); cx.tcx,
cx.module.isa().triple(),
instance,
false,
);
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility); let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
cx.module.declare_function(&name, linkage, &sig).unwrap(); cx.module.declare_function(&name, linkage, &sig).unwrap();
} }
@ -85,7 +93,8 @@ fn trans_mono_item<'tcx, B: Backend + 'static>(
} }
}); });
tcx.sess.time("codegen fn", || crate::base::trans_fn(cx, inst, linkage)); tcx.sess
.time("codegen fn", || crate::base::trans_fn(cx, inst, linkage));
} }
MonoItem::Static(def_id) => { MonoItem::Static(def_id) => {
crate::constant::codegen_static(&mut cx.constants_cx, def_id); crate::constant::codegen_static(&mut cx.constants_cx, def_id);
@ -103,12 +112,21 @@ fn trans_mono_item<'tcx, B: Backend + 'static>(
} }
fn time<R>(tcx: TyCtxt<'_>, name: &'static str, f: impl FnOnce() -> R) -> R { fn time<R>(tcx: TyCtxt<'_>, name: &'static str, f: impl FnOnce() -> R) -> R {
if std::env::var("CG_CLIF_DISPLAY_CG_TIME").as_ref().map(|val| &**val) == Ok("1") { if std::env::var("CG_CLIF_DISPLAY_CG_TIME")
.as_ref()
.map(|val| &**val)
== Ok("1")
{
println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name); println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
let before = std::time::Instant::now(); let before = std::time::Instant::now();
let res = tcx.sess.time(name, f); let res = tcx.sess.time(name, f);
let after = std::time::Instant::now(); let after = std::time::Instant::now();
println!("[{:<30}: {}] end time: {:?}", tcx.crate_name(LOCAL_CRATE), name, after - before); println!(
"[{:<30}: {}] end time: {:?}",
tcx.crate_name(LOCAL_CRATE),
name,
after - before
);
res res
} else { } else {
tcx.sess.time(name, f) tcx.sess.time(name, f)

View File

@ -2,7 +2,7 @@ use crate::prelude::*;
use std::fmt::Write; use std::fmt::Write;
use rustc_ast::ast::{InlineAsmTemplatePiece, InlineAsmOptions}; use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_middle::mir::InlineAsmOperand; use rustc_middle::mir::InlineAsmOperand;
use rustc_target::asm::*; use rustc_target::asm::*;
@ -45,21 +45,46 @@ pub(crate) fn codegen_inline_asm<'tcx>(
InlineAsmOperand::In { reg, ref value } => { InlineAsmOperand::In { reg, ref value } => {
let reg = expect_reg(reg); let reg = expect_reg(reg);
clobbered_regs.push((reg, new_slot(reg.reg_class()))); clobbered_regs.push((reg, new_slot(reg.reg_class())));
inputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_operand(fx, value).load_scalar(fx))); inputs.push((
reg,
new_slot(reg.reg_class()),
crate::base::trans_operand(fx, value).load_scalar(fx),
));
} }
InlineAsmOperand::Out { reg, late: _, place } => { InlineAsmOperand::Out {
reg,
late: _,
place,
} => {
let reg = expect_reg(reg); let reg = expect_reg(reg);
clobbered_regs.push((reg, new_slot(reg.reg_class()))); clobbered_regs.push((reg, new_slot(reg.reg_class())));
if let Some(place) = place { if let Some(place) = place {
outputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_place(fx, place))); outputs.push((
reg,
new_slot(reg.reg_class()),
crate::base::trans_place(fx, place),
));
} }
} }
InlineAsmOperand::InOut { reg, late: _, ref in_value, out_place } => { InlineAsmOperand::InOut {
reg,
late: _,
ref in_value,
out_place,
} => {
let reg = expect_reg(reg); let reg = expect_reg(reg);
clobbered_regs.push((reg, new_slot(reg.reg_class()))); clobbered_regs.push((reg, new_slot(reg.reg_class())));
inputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_operand(fx, in_value).load_scalar(fx))); inputs.push((
reg,
new_slot(reg.reg_class()),
crate::base::trans_operand(fx, in_value).load_scalar(fx),
));
if let Some(out_place) = out_place { if let Some(out_place) = out_place {
outputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_place(fx, out_place))); outputs.push((
reg,
new_slot(reg.reg_class()),
crate::base::trans_place(fx, out_place),
));
} }
} }
InlineAsmOperand::Const { value: _ } => todo!(), InlineAsmOperand::Const { value: _ } => todo!(),
@ -70,9 +95,21 @@ pub(crate) fn codegen_inline_asm<'tcx>(
let inline_asm_index = fx.inline_asm_index; let inline_asm_index = fx.inline_asm_index;
fx.inline_asm_index += 1; fx.inline_asm_index += 1;
let asm_name = format!("{}__inline_asm_{}", fx.tcx.symbol_name(fx.instance).name, inline_asm_index); let asm_name = format!(
"{}__inline_asm_{}",
fx.tcx.symbol_name(fx.instance).name,
inline_asm_index
);
let generated_asm = generate_asm_wrapper(&asm_name, InlineAsmArch::X86_64, options, template, clobbered_regs, &inputs, &outputs); let generated_asm = generate_asm_wrapper(
&asm_name,
InlineAsmArch::X86_64,
options,
template,
clobbered_regs,
&inputs,
&outputs,
);
fx.cx.global_asm.push_str(&generated_asm); fx.cx.global_asm.push_str(&generated_asm);
call_inline_asm(fx, &asm_name, slot_size, inputs, outputs); call_inline_asm(fx, &asm_name, slot_size, inputs, outputs);
@ -90,7 +127,12 @@ fn generate_asm_wrapper(
let mut generated_asm = String::new(); let mut generated_asm = String::new();
writeln!(generated_asm, ".globl {}", asm_name).unwrap(); writeln!(generated_asm, ".globl {}", asm_name).unwrap();
writeln!(generated_asm, ".type {},@function", asm_name).unwrap(); writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap(); writeln!(
generated_asm,
".section .text.{},\"ax\",@progbits",
asm_name
)
.unwrap();
writeln!(generated_asm, "{}:", asm_name).unwrap(); writeln!(generated_asm, "{}:", asm_name).unwrap();
generated_asm.push_str(".intel_syntax noprefix\n"); generated_asm.push_str(".intel_syntax noprefix\n");
@ -120,7 +162,11 @@ fn generate_asm_wrapper(
InlineAsmTemplatePiece::String(s) => { InlineAsmTemplatePiece::String(s) => {
generated_asm.push_str(s); generated_asm.push_str(s);
} }
InlineAsmTemplatePiece::Placeholder { operand_idx: _, modifier: _, span: _ } => todo!(), InlineAsmTemplatePiece::Placeholder {
operand_idx: _,
modifier: _,
span: _,
} => todo!(),
} }
} }
generated_asm.push('\n'); generated_asm.push('\n');
@ -147,7 +193,7 @@ fn generate_asm_wrapper(
} }
generated_asm.push_str(".att_syntax\n"); generated_asm.push_str(".att_syntax\n");
writeln!(generated_asm, ".size {name}, .-{name}", name=asm_name).unwrap(); writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
generated_asm.push_str(".text\n"); generated_asm.push_str(".text\n");
generated_asm.push_str("\n\n"); generated_asm.push_str("\n\n");
@ -169,17 +215,30 @@ fn call_inline_asm<'tcx>(
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
fx.add_comment(stack_slot, "inline asm scratch slot"); fx.add_comment(stack_slot, "inline asm scratch slot");
let inline_asm_func = fx.cx.module.declare_function(asm_name, Linkage::Import, &Signature { let inline_asm_func = fx
call_conv: CallConv::SystemV, .cx
params: vec![AbiParam::new(fx.pointer_type)], .module
returns: vec![], .declare_function(
}).unwrap(); asm_name,
let inline_asm_func = fx.cx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func); Linkage::Import,
&Signature {
call_conv: CallConv::SystemV,
params: vec![AbiParam::new(fx.pointer_type)],
returns: vec![],
},
)
.unwrap();
let inline_asm_func = fx
.cx
.module
.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
fx.add_comment(inline_asm_func, asm_name); fx.add_comment(inline_asm_func, asm_name);
for (_reg, offset, value) in inputs { for (_reg, offset, value) in inputs {
fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap()); fx.bcx
.ins()
.stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
} }
let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0); let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
@ -187,7 +246,10 @@ fn call_inline_asm<'tcx>(
for (_reg, offset, place) in outputs { for (_reg, offset, place) in outputs {
let ty = fx.clif_type(place.layout().ty).unwrap(); let ty = fx.clif_type(place.layout().ty).unwrap();
let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap()); let value = fx
.bcx
.ins()
.stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
place.write_cvalue(fx, CValue::by_val(value, place.layout())); place.write_cvalue(fx, CValue::by_val(value, place.layout()));
} }
} }
@ -203,18 +265,25 @@ fn save_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsm
match arch { match arch {
InlineAsmArch::X86_64 => { InlineAsmArch::X86_64 => {
write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap(); write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap(); reg.emit(generated_asm, InlineAsmArch::X86_64, None)
.unwrap();
generated_asm.push('\n'); generated_asm.push('\n');
} }
_ => unimplemented!("save_register for {:?}", arch), _ => unimplemented!("save_register for {:?}", arch),
} }
} }
fn restore_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) { fn restore_register(
generated_asm: &mut String,
arch: InlineAsmArch,
reg: InlineAsmReg,
offset: Size,
) {
match arch { match arch {
InlineAsmArch::X86_64 => { InlineAsmArch::X86_64 => {
generated_asm.push_str(" mov "); generated_asm.push_str(" mov ");
reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap(); reg.emit(generated_asm, InlineAsmArch::X86_64, None)
.unwrap();
writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap(); writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
} }
_ => unimplemented!("restore_register for {:?}", arch), _ => unimplemented!("restore_register for {:?}", arch),

View File

@ -29,37 +29,60 @@ pub(crate) fn codegen_cpuid_call<'tcx>(
fx.bcx.switch_to_block(leaf_0); fx.bcx.switch_to_block(leaf_0);
let max_basic_leaf = fx.bcx.ins().iconst(types::I32, 1); let max_basic_leaf = fx.bcx.ins().iconst(types::I32, 1);
let vend0 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu"))); let vend0 = fx
let vend2 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI"))); .bcx
let vend1 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel"))); .ins()
fx.bcx.ins().jump(dest, &[max_basic_leaf, vend0, vend1, vend2]); .iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
let vend2 = fx
.bcx
.ins()
.iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
let vend1 = fx
.bcx
.ins()
.iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
fx.bcx
.ins()
.jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
fx.bcx.switch_to_block(leaf_1); fx.bcx.switch_to_block(leaf_1);
let cpu_signature = fx.bcx.ins().iconst(types::I32, 0); let cpu_signature = fx.bcx.ins().iconst(types::I32, 0);
let additional_information = fx.bcx.ins().iconst(types::I32, 0); let additional_information = fx.bcx.ins().iconst(types::I32, 0);
let ecx_features = fx.bcx.ins().iconst( let ecx_features = fx.bcx.ins().iconst(types::I32, 0);
types::I32, let edx_features = fx
0, .bcx
.ins()
.iconst(types::I32, 1 << 25 /* sse */ | 1 << 26 /* sse2 */);
fx.bcx.ins().jump(
dest,
&[
cpu_signature,
additional_information,
ecx_features,
edx_features,
],
); );
let edx_features = fx.bcx.ins().iconst(
types::I32,
1 << 25 /* sse */ | 1 << 26 /* sse2 */,
);
fx.bcx.ins().jump(dest, &[cpu_signature, additional_information, ecx_features, edx_features]);
fx.bcx.switch_to_block(leaf_8000_0000); fx.bcx.switch_to_block(leaf_8000_0000);
let extended_max_basic_leaf = fx.bcx.ins().iconst(types::I32, 0); let extended_max_basic_leaf = fx.bcx.ins().iconst(types::I32, 0);
let zero = fx.bcx.ins().iconst(types::I32, 0); let zero = fx.bcx.ins().iconst(types::I32, 0);
fx.bcx.ins().jump(dest, &[extended_max_basic_leaf, zero, zero, zero]); fx.bcx
.ins()
.jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
fx.bcx.switch_to_block(leaf_8000_0001); fx.bcx.switch_to_block(leaf_8000_0001);
let zero = fx.bcx.ins().iconst(types::I32, 0); let zero = fx.bcx.ins().iconst(types::I32, 0);
let proc_info_ecx = fx.bcx.ins().iconst(types::I32, 0); let proc_info_ecx = fx.bcx.ins().iconst(types::I32, 0);
let proc_info_edx = fx.bcx.ins().iconst(types::I32, 0); let proc_info_edx = fx.bcx.ins().iconst(types::I32, 0);
fx.bcx.ins().jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]); fx.bcx
.ins()
.jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
fx.bcx.switch_to_block(unsupported_leaf); fx.bcx.switch_to_block(unsupported_leaf);
crate::trap::trap_unreachable(fx, "__cpuid_count arch intrinsic doesn't yet support specified leaf"); crate::trap::trap_unreachable(
fx,
"__cpuid_count arch intrinsic doesn't yet support specified leaf",
);
fx.bcx.switch_to_block(dest); fx.bcx.switch_to_block(dest);
fx.bcx.ins().nop(); fx.bcx.ins().nop();

View File

@ -108,7 +108,7 @@ macro call_intrinsic_match {
} }
} }
macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) { macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
crate::atomic_shim::lock_global_lock($fx); crate::atomic_shim::lock_global_lock($fx);
let clif_ty = $fx.clif_type($T).unwrap(); let clif_ty = $fx.clif_type($T).unwrap();
@ -144,7 +144,13 @@ macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
match $ty.kind { match $ty.kind {
ty::Uint(_) | ty::Int(_) => {} ty::Uint(_) | ty::Int(_) => {}
_ => { _ => {
$fx.tcx.sess.span_err($span, &format!("`{}` intrinsic: expected basic integer type, found `{:?}`", $intrinsic, $ty)); $fx.tcx.sess.span_err(
$span,
&format!(
"`{}` intrinsic: expected basic integer type, found `{:?}`",
$intrinsic, $ty
),
);
// Prevent verifier error // Prevent verifier error
crate::trap::trap_unreachable($fx, "compilation should not have succeeded"); crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
return; return;
@ -170,10 +176,15 @@ fn lane_type_and_count<'tcx>(
rustc_target::abi::FieldsShape::Array { stride: _, count } => u16::try_from(count).unwrap(), rustc_target::abi::FieldsShape::Array { stride: _, count } => u16::try_from(count).unwrap(),
_ => unreachable!("lane_type_and_count({:?})", layout), _ => unreachable!("lane_type_and_count({:?})", layout),
}; };
let lane_layout = layout.field(&ty::layout::LayoutCx { let lane_layout = layout
tcx, .field(
param_env: ParamEnv::reveal_all(), &ty::layout::LayoutCx {
}, 0).unwrap(); tcx,
param_env: ParamEnv::reveal_all(),
},
0,
)
.unwrap();
(lane_layout, lane_count) (lane_layout, lane_count)
} }
@ -405,10 +416,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
trap_unreachable(fx, "[corruption] Called intrinsic::unreachable."); trap_unreachable(fx, "[corruption] Called intrinsic::unreachable.");
} }
"transmute" => { "transmute" => {
trap_unreachable( trap_unreachable(fx, "[corruption] Transmuting to uninhabited type.");
fx,
"[corruption] Transmuting to uninhabited type.",
);
} }
_ => unimplemented!("unsupported instrinsic {}", intrinsic), _ => unimplemented!("unsupported instrinsic {}", intrinsic),
} }

View File

@ -1,5 +1,5 @@
use crate::prelude::*;
use super::*; use super::*;
use crate::prelude::*;
pub(super) fn codegen_simd_intrinsic_call<'tcx>( pub(super) fn codegen_simd_intrinsic_call<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>, fx: &mut FunctionCx<'_, 'tcx, impl Backend>,

View File

@ -1,4 +1,11 @@
#![feature(rustc_private, decl_macro, type_alias_impl_trait, associated_type_bounds, never_type, try_blocks)] #![feature(
rustc_private,
decl_macro,
type_alias_impl_trait,
associated_type_bounds,
never_type,
try_blocks
)]
#![warn(rust_2018_idioms)] #![warn(rust_2018_idioms)]
#![warn(unused_lifetimes)] #![warn(unused_lifetimes)]
@ -7,6 +14,7 @@ extern crate flate2;
extern crate libc; extern crate libc;
#[macro_use] #[macro_use]
extern crate rustc_middle; extern crate rustc_middle;
extern crate rustc_ast;
extern crate rustc_codegen_ssa; extern crate rustc_codegen_ssa;
extern crate rustc_data_structures; extern crate rustc_data_structures;
extern crate rustc_errors; extern crate rustc_errors;
@ -19,7 +27,6 @@ extern crate rustc_session;
extern crate rustc_span; extern crate rustc_span;
extern crate rustc_symbol_mangling; extern crate rustc_symbol_mangling;
extern crate rustc_target; extern crate rustc_target;
extern crate rustc_ast;
// This prevents duplicating functions and statics that are already part of the host rustc process. // This prevents duplicating functions and statics that are already part of the host rustc process.
#[allow(unused_extern_crates)] #[allow(unused_extern_crates)]
@ -27,14 +34,14 @@ extern crate rustc_driver;
use std::any::Any; use std::any::Any;
use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_codegen_ssa::CodegenResults;
use rustc_errors::ErrorReported; use rustc_errors::ErrorReported;
use rustc_middle::dep_graph::{DepGraph, WorkProduct, WorkProductId}; use rustc_middle::dep_graph::{DepGraph, WorkProduct, WorkProductId};
use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader}; use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
use rustc_session::Session;
use rustc_session::config::OutputFilenames;
use rustc_middle::ty::query::Providers; use rustc_middle::ty::query::Providers;
use rustc_codegen_ssa::CodegenResults; use rustc_session::config::OutputFilenames;
use rustc_codegen_ssa::traits::CodegenBackend; use rustc_session::Session;
use cranelift_codegen::settings::{self, Configurable}; use cranelift_codegen::settings::{self, Configurable};
@ -46,8 +53,8 @@ mod allocator;
mod analyze; mod analyze;
mod archive; mod archive;
mod atomic_shim; mod atomic_shim;
mod base;
mod backend; mod backend;
mod base;
mod cast; mod cast;
mod codegen_i128; mod codegen_i128;
mod common; mod common;
@ -77,26 +84,29 @@ mod prelude {
pub(crate) use rustc_ast::ast::{FloatTy, IntTy, UintTy}; pub(crate) use rustc_ast::ast::{FloatTy, IntTy, UintTy};
pub(crate) use rustc_span::Span; pub(crate) use rustc_span::Span;
pub(crate) use rustc_middle::bug;
pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE}; pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
pub(crate) use rustc_middle::bug;
pub(crate) use rustc_middle::mir::{self, *}; pub(crate) use rustc_middle::mir::{self, *};
pub(crate) use rustc_middle::ty::layout::{self, TyAndLayout}; pub(crate) use rustc_middle::ty::layout::{self, TyAndLayout};
pub(crate) use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx};
pub(crate) use rustc_middle::ty::{ pub(crate) use rustc_middle::ty::{
self, FnSig, Instance, InstanceDef, ParamEnv, Ty, TyCtxt, TypeAndMut, TypeFoldable, self, FnSig, Instance, InstanceDef, ParamEnv, Ty, TyCtxt, TypeAndMut, TypeFoldable,
}; };
pub(crate) use rustc_target::abi::{Abi, LayoutOf, Scalar, Size, VariantIdx};
pub(crate) use rustc_data_structures::fx::FxHashMap; pub(crate) use rustc_data_structures::fx::FxHashMap;
pub(crate) use rustc_index::vec::Idx; pub(crate) use rustc_index::vec::Idx;
pub(crate) use cranelift_codegen::Context;
pub(crate) use cranelift_codegen::entity::EntitySet; pub(crate) use cranelift_codegen::entity::EntitySet;
pub(crate) use cranelift_codegen::ir::{AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc, StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value};
pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC}; pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
pub(crate) use cranelift_codegen::ir::function::Function; pub(crate) use cranelift_codegen::ir::function::Function;
pub(crate) use cranelift_codegen::ir::types; pub(crate) use cranelift_codegen::ir::types;
pub(crate) use cranelift_codegen::ir::{
AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
};
pub(crate) use cranelift_codegen::isa::{self, CallConv}; pub(crate) use cranelift_codegen::isa::{self, CallConv};
pub(crate) use cranelift_codegen::Context;
pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable}; pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
pub(crate) use cranelift_module::{ pub(crate) use cranelift_module::{
self, Backend, DataContext, DataId, FuncId, Linkage, Module, self, Backend, DataContext, DataId, FuncId, Linkage, Module,
@ -133,17 +143,10 @@ struct CodegenCx<'tcx, B: Backend + 'static> {
} }
impl<'tcx, B: Backend + 'static> CodegenCx<'tcx, B> { impl<'tcx, B: Backend + 'static> CodegenCx<'tcx, B> {
fn new( fn new(tcx: TyCtxt<'tcx>, module: Module<B>, debug_info: bool) -> Self {
tcx: TyCtxt<'tcx>,
module: Module<B>,
debug_info: bool,
) -> Self {
let unwind_context = UnwindContext::new(tcx, module.isa()); let unwind_context = UnwindContext::new(tcx, module.isa());
let debug_context = if debug_info { let debug_context = if debug_info {
Some(DebugContext::new( Some(DebugContext::new(tcx, module.isa()))
tcx,
module.isa(),
))
} else { } else {
None None
}; };
@ -159,9 +162,21 @@ impl<'tcx, B: Backend + 'static> CodegenCx<'tcx, B> {
} }
} }
fn finalize(mut self) -> (Module<B>, String, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) { fn finalize(
mut self,
) -> (
Module<B>,
String,
Option<DebugContext<'tcx>>,
UnwindContext<'tcx>,
) {
self.constants_cx.finalize(self.tcx, &mut self.module); self.constants_cx.finalize(self.tcx, &mut self.module);
(self.module, self.global_asm, self.debug_context, self.unwind_context) (
self.module,
self.global_asm,
self.debug_context,
self.unwind_context,
)
} }
} }
@ -220,7 +235,9 @@ impl CodegenBackend for CraneliftCodegenBackend {
sess: &Session, sess: &Session,
dep_graph: &DepGraph, dep_graph: &DepGraph,
) -> Result<Box<dyn Any>, ErrorReported> { ) -> Result<Box<dyn Any>, ErrorReported> {
let (codegen_results, work_products) = *ongoing_codegen.downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>().unwrap(); let (codegen_results, work_products) = *ongoing_codegen
.downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
.unwrap();
sess.time("serialize_work_products", move || { sess.time("serialize_work_products", move || {
rustc_incremental::save_work_product_index(sess, &dep_graph, work_products) rustc_incremental::save_work_product_index(sess, &dep_graph, work_products)

View File

@ -2,7 +2,11 @@ use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
use crate::prelude::*; use crate::prelude::*;
pub(crate) fn get_clif_linkage(mono_item: MonoItem<'_>, linkage: RLinkage, visibility: Visibility) -> Linkage { pub(crate) fn get_clif_linkage(
mono_item: MonoItem<'_>,
linkage: RLinkage,
visibility: Visibility,
) -> Linkage {
match (linkage, visibility) { match (linkage, visibility) {
(RLinkage::External, Visibility::Default) => Linkage::Export, (RLinkage::External, Visibility::Default) => Linkage::Export,
(RLinkage::Internal, Visibility::Default) => Linkage::Local, (RLinkage::Internal, Visibility::Default) => Linkage::Local,

View File

@ -26,7 +26,13 @@ pub(crate) fn maybe_create_entry_wrapper(
return; return;
} }
create_entry_fn(tcx, module, unwind_context, main_def_id, use_start_lang_item); create_entry_fn(
tcx,
module,
unwind_context,
main_def_id,
use_start_lang_item,
);
fn create_entry_fn( fn create_entry_fn(
tcx: TyCtxt<'_>, tcx: TyCtxt<'_>,
@ -114,7 +120,8 @@ pub(crate) fn maybe_create_entry_wrapper(
cmain_func_id, cmain_func_id,
&mut ctx, &mut ctx,
&mut cranelift_codegen::binemit::NullTrapSink {}, &mut cranelift_codegen::binemit::NullTrapSink {},
).unwrap(); )
.unwrap();
unwind_context.add_function(cmain_func_id, &ctx, m.isa()); unwind_context.add_function(cmain_func_id, &ctx, m.isa());
} }
} }

View File

@ -2,12 +2,12 @@ use std::convert::TryFrom;
use std::fs::File; use std::fs::File;
use std::path::Path; use std::path::Path;
use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
use rustc_session::config;
use rustc_middle::ty::TyCtxt;
use rustc_codegen_ssa::METADATA_FILENAME; use rustc_codegen_ssa::METADATA_FILENAME;
use rustc_data_structures::owning_ref::{self, OwningRef}; use rustc_data_structures::owning_ref::{self, OwningRef};
use rustc_data_structures::rustc_erase_owner; use rustc_data_structures::rustc_erase_owner;
use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoader};
use rustc_middle::ty::TyCtxt;
use rustc_session::config;
use rustc_target::spec::Target; use rustc_target::spec::Target;
use crate::backend::WriteMetadata; use crate::backend::WriteMetadata;
@ -27,7 +27,7 @@ impl MetadataLoader for CraneliftMetadataLoader {
if entry.header().identifier() == METADATA_FILENAME.as_bytes() { if entry.header().identifier() == METADATA_FILENAME.as_bytes() {
let mut buf = Vec::with_capacity( let mut buf = Vec::with_capacity(
usize::try_from(entry.header().size()) usize::try_from(entry.header().size())
.expect("Rlib metadata file too big to load into memory.") .expect("Rlib metadata file too big to load into memory."),
); );
::std::io::copy(&mut entry, &mut buf).map_err(|e| format!("{:?}", e))?; ::std::io::copy(&mut entry, &mut buf).map_err(|e| format!("{:?}", e))?;
let buf: OwningRef<Vec<u8>, [u8]> = OwningRef::new(buf).into(); let buf: OwningRef<Vec<u8>, [u8]> = OwningRef::new(buf).into();
@ -59,7 +59,10 @@ impl MetadataLoader for CraneliftMetadataLoader {
} }
// Adapted from https://github.com/rust-lang/rust/blob/da573206f87b5510de4b0ee1a9c044127e409bd3/src/librustc_codegen_llvm/base.rs#L47-L112 // Adapted from https://github.com/rust-lang/rust/blob/da573206f87b5510de4b0ee1a9c044127e409bd3/src/librustc_codegen_llvm/base.rs#L47-L112
pub(crate) fn write_metadata<P: WriteMetadata>(tcx: TyCtxt<'_>, product: &mut P) -> EncodedMetadata { pub(crate) fn write_metadata<P: WriteMetadata>(
tcx: TyCtxt<'_>,
product: &mut P,
) -> EncodedMetadata {
use flate2::write::DeflateEncoder; use flate2::write::DeflateEncoder;
use flate2::Compression; use flate2::Compression;
use std::io::Write; use std::io::Write;

View File

@ -253,7 +253,11 @@ pub(crate) fn trans_checked_int_binop<'tcx>(
let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs); let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs); let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
let val = fx.bcx.ins().imul(lhs, rhs); let val = fx.bcx.ins().imul(lhs, rhs);
let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, val, (1 << ty.bits()) - 1); let has_overflow = fx.bcx.ins().icmp_imm(
IntCC::UnsignedGreaterThan,
val,
(1 << ty.bits()) - 1,
);
let val = fx.bcx.ins().ireduce(ty, val); let val = fx.bcx.ins().ireduce(ty, val);
(val, has_overflow) (val, has_overflow)
} }
@ -261,8 +265,15 @@ pub(crate) fn trans_checked_int_binop<'tcx>(
let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs); let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs); let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
let val = fx.bcx.ins().imul(lhs, rhs); let val = fx.bcx.ins().imul(lhs, rhs);
let has_underflow = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1))); let has_underflow =
let has_overflow = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, val, (1 << (ty.bits() - 1)) - 1); fx.bcx
.ins()
.icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
let has_overflow = fx.bcx.ins().icmp_imm(
IntCC::SignedGreaterThan,
val,
(1 << (ty.bits() - 1)) - 1,
);
let val = fx.bcx.ins().ireduce(ty, val); let val = fx.bcx.ins().ireduce(ty, val);
(val, fx.bcx.ins().bor(has_underflow, has_overflow)) (val, fx.bcx.ins().bor(has_underflow, has_overflow))
} }
@ -275,12 +286,18 @@ pub(crate) fn trans_checked_int_binop<'tcx>(
} else { } else {
let val_hi = fx.bcx.ins().smulhi(lhs, rhs); let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
let not_all_zero = fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0); let not_all_zero = fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0);
let not_all_ones = fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, u64::try_from((1u128 << ty.bits()) - 1).unwrap() as i64); let not_all_ones = fx.bcx.ins().icmp_imm(
IntCC::NotEqual,
val_hi,
u64::try_from((1u128 << ty.bits()) - 1).unwrap() as i64,
);
fx.bcx.ins().band(not_all_zero, not_all_ones) fx.bcx.ins().band(not_all_zero, not_all_ones)
}; };
(val, has_overflow) (val, has_overflow)
} }
types::I128 => unreachable!("i128 should have been handled by codegen_i128::maybe_codegen"), types::I128 => {
unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
}
_ => unreachable!("invalid non-integer type {}", ty), _ => unreachable!("invalid non-integer type {}", ty),
} }
} }
@ -291,8 +308,10 @@ pub(crate) fn trans_checked_int_binop<'tcx>(
let val = fx.bcx.ins().ishl(lhs, actual_shift); let val = fx.bcx.ins().ishl(lhs, actual_shift);
let ty = fx.bcx.func.dfg.value_type(val); let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1; let max_shift = i64::from(ty.bits()) - 1;
let has_overflow = let has_overflow = fx
fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift); .bcx
.ins()
.icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
(val, has_overflow) (val, has_overflow)
} }
BinOp::Shr => { BinOp::Shr => {
@ -306,8 +325,10 @@ pub(crate) fn trans_checked_int_binop<'tcx>(
}; };
let ty = fx.bcx.func.dfg.value_type(val); let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1; let max_shift = i64::from(ty.bits()) - 1;
let has_overflow = let has_overflow = fx
fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift); .bcx
.ins()
.icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
(val, has_overflow) (val, has_overflow)
} }
_ => bug!( _ => bug!(
@ -323,7 +344,10 @@ pub(crate) fn trans_checked_int_binop<'tcx>(
// FIXME directly write to result place instead // FIXME directly write to result place instead
let out_place = CPlace::new_stack_slot( let out_place = CPlace::new_stack_slot(
fx, fx,
fx.layout_of(fx.tcx.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter())), fx.layout_of(
fx.tcx
.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()),
),
); );
let out_layout = out_place.layout(); let out_layout = out_place.layout();
out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout)); out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout));
@ -382,9 +406,12 @@ pub(crate) fn trans_ptr_binop<'tcx>(
in_lhs: CValue<'tcx>, in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>, in_rhs: CValue<'tcx>,
) -> CValue<'tcx> { ) -> CValue<'tcx> {
let is_thin_ptr = in_lhs.layout().ty.builtin_deref(true).map(|TypeAndMut { ty, mutbl: _}| { let is_thin_ptr = in_lhs
!has_ptr_meta(fx.tcx, ty) .layout()
}).unwrap_or(true); .ty
.builtin_deref(true)
.map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
.unwrap_or(true);
if is_thin_ptr { if is_thin_ptr {
match bin_op { match bin_op {

View File

@ -15,7 +15,10 @@ pub(super) fn optimize_function(ctx: &mut Context, cold_blocks: &EntitySet<Block
// bytecodealliance/cranelift#1339 is implemented. // bytecodealliance/cranelift#1339 is implemented.
let mut block_insts = FxHashMap::default(); let mut block_insts = FxHashMap::default();
for block in cold_blocks.keys().filter(|&block| cold_blocks.contains(block)) { for block in cold_blocks
.keys()
.filter(|&block| cold_blocks.contains(block))
{
let insts = ctx.func.layout.block_insts(block).collect::<Vec<_>>(); let insts = ctx.func.layout.block_insts(block).collect::<Vec<_>>();
for &inst in &insts { for &inst in &insts {
ctx.func.layout.remove_inst(inst); ctx.func.layout.remove_inst(inst);
@ -25,7 +28,10 @@ pub(super) fn optimize_function(ctx: &mut Context, cold_blocks: &EntitySet<Block
} }
// And then append them at the back again. // And then append them at the back again.
for block in cold_blocks.keys().filter(|&block| cold_blocks.contains(block)) { for block in cold_blocks
.keys()
.filter(|&block| cold_blocks.contains(block))
{
ctx.func.layout.append_block(block); ctx.func.layout.append_block(block);
for inst in block_insts.remove(&block).unwrap() { for inst in block_insts.remove(&block).unwrap() {
ctx.func.layout.append_inst(inst, block); ctx.func.layout.append_inst(inst, block);

View File

@ -5,8 +5,7 @@ mod stack2reg;
pub(crate) fn optimize_function<'tcx>( pub(crate) fn optimize_function<'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
#[cfg_attr(not(debug_assertions), allow(unused_variables))] #[cfg_attr(not(debug_assertions), allow(unused_variables))] instance: Instance<'tcx>,
instance: Instance<'tcx>,
ctx: &mut Context, ctx: &mut Context,
cold_blocks: &EntitySet<Block>, cold_blocks: &EntitySet<Block>,
clif_comments: &mut crate::pretty_clif::CommentWriter, clif_comments: &mut crate::pretty_clif::CommentWriter,

View File

@ -16,8 +16,8 @@ use std::ops::Not;
use rustc_data_structures::fx::{FxHashSet, FxHasher}; use rustc_data_structures::fx::{FxHashSet, FxHasher};
use cranelift_codegen::cursor::{Cursor, FuncCursor}; use cranelift_codegen::cursor::{Cursor, FuncCursor};
use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
use cranelift_codegen::ir::immediates::Offset32; use cranelift_codegen::ir::immediates::Offset32;
use cranelift_codegen::ir::{InstructionData, Opcode, ValueDef};
use hashbrown::HashSet; use hashbrown::HashSet;
use std::hash::BuildHasherDefault; use std::hash::BuildHasherDefault;
@ -55,31 +55,41 @@ struct StackSlotUsage {
impl StackSlotUsage { impl StackSlotUsage {
fn potential_stores_for_load(&self, ctx: &Context, load: Inst) -> Vec<Inst> { fn potential_stores_for_load(&self, ctx: &Context, load: Inst) -> Vec<Inst> {
self.stack_store.iter().cloned().filter(|&store| { self.stack_store
match spatial_overlap(&ctx.func, store, load) { .iter()
SpatialOverlap::No => false, // Can never be the source of the loaded value. .cloned()
SpatialOverlap::Partial | SpatialOverlap::Full => true, .filter(|&store| {
} match spatial_overlap(&ctx.func, store, load) {
}).filter(|&store| { SpatialOverlap::No => false, // Can never be the source of the loaded value.
match temporal_order(ctx, store, load) { SpatialOverlap::Partial | SpatialOverlap::Full => true,
TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value. }
TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true, })
} .filter(|&store| {
}).collect::<Vec<Inst>>() match temporal_order(ctx, store, load) {
TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
}
})
.collect::<Vec<Inst>>()
} }
fn potential_loads_of_store(&self, ctx: &Context, store: Inst) -> Vec<Inst> { fn potential_loads_of_store(&self, ctx: &Context, store: Inst) -> Vec<Inst> {
self.stack_load.iter().cloned().filter(|&load| { self.stack_load
match spatial_overlap(&ctx.func, store, load) { .iter()
SpatialOverlap::No => false, // Can never be the source of the loaded value. .cloned()
SpatialOverlap::Partial | SpatialOverlap::Full => true, .filter(|&load| {
} match spatial_overlap(&ctx.func, store, load) {
}).filter(|&load| { SpatialOverlap::No => false, // Can never be the source of the loaded value.
match temporal_order(ctx, store, load) { SpatialOverlap::Partial | SpatialOverlap::Full => true,
TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value. }
TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true, })
} .filter(|&load| {
}).collect::<Vec<Inst>>() match temporal_order(ctx, store, load) {
TemporalOrder::NeverBefore => false, // Can never be the source of the loaded value.
TemporalOrder::MaybeBefore | TemporalOrder::DefinitivelyBefore => true,
}
})
.collect::<Vec<Inst>>()
} }
fn remove_unused_stack_addr(func: &mut Function, inst: Inst) { fn remove_unused_stack_addr(func: &mut Function, inst: Inst) {
@ -134,14 +144,22 @@ impl<'a> OptimizeContext<'a> {
stack_slot, stack_slot,
offset: _, offset: _,
} => { } => {
stack_slot_usage_map.entry(OrdStackSlot(stack_slot)).or_insert_with(StackSlotUsage::default).stack_addr.insert(inst); stack_slot_usage_map
.entry(OrdStackSlot(stack_slot))
.or_insert_with(StackSlotUsage::default)
.stack_addr
.insert(inst);
} }
InstructionData::StackLoad { InstructionData::StackLoad {
opcode: Opcode::StackLoad, opcode: Opcode::StackLoad,
stack_slot, stack_slot,
offset: _, offset: _,
} => { } => {
stack_slot_usage_map.entry(OrdStackSlot(stack_slot)).or_insert_with(StackSlotUsage::default).stack_load.insert(inst); stack_slot_usage_map
.entry(OrdStackSlot(stack_slot))
.or_insert_with(StackSlotUsage::default)
.stack_load
.insert(inst);
} }
InstructionData::StackStore { InstructionData::StackStore {
opcode: Opcode::StackStore, opcode: Opcode::StackStore,
@ -149,7 +167,11 @@ impl<'a> OptimizeContext<'a> {
stack_slot, stack_slot,
offset: _, offset: _,
} => { } => {
stack_slot_usage_map.entry(OrdStackSlot(stack_slot)).or_insert_with(StackSlotUsage::default).stack_store.insert(inst); stack_slot_usage_map
.entry(OrdStackSlot(stack_slot))
.or_insert_with(StackSlotUsage::default)
.stack_store
.insert(inst);
} }
_ => {} _ => {}
} }
@ -165,7 +187,6 @@ impl<'a> OptimizeContext<'a> {
pub(super) fn optimize_function( pub(super) fn optimize_function(
ctx: &mut Context, ctx: &mut Context,
#[cfg_attr(not(debug_assertions), allow(unused_variables))]
clif_comments: &mut crate::pretty_clif::CommentWriter, clif_comments: &mut crate::pretty_clif::CommentWriter,
) { ) {
combine_stack_addr_with_load_store(&mut ctx.func); combine_stack_addr_with_load_store(&mut ctx.func);
@ -176,7 +197,8 @@ pub(super) fn optimize_function(
remove_unused_stack_addr_and_stack_load(&mut opt_ctx); remove_unused_stack_addr_and_stack_load(&mut opt_ctx);
#[cfg(debug_assertions)] { #[cfg(debug_assertions)]
{
for (&OrdStackSlot(stack_slot), usage) in &opt_ctx.stack_slot_usage_map { for (&OrdStackSlot(stack_slot), usage) in &opt_ctx.stack_slot_usage_map {
clif_comments.add_comment(stack_slot, format!("used by: {:?}", usage)); clif_comments.add_comment(stack_slot, format!("used by: {:?}", usage));
} }
@ -194,13 +216,16 @@ pub(super) fn optimize_function(
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
for &store in &potential_stores { for &store in &potential_stores {
clif_comments.add_comment(load, format!( clif_comments.add_comment(
"Potential store -> load forwarding {} -> {} ({:?}, {:?})", load,
opt_ctx.ctx.func.dfg.display_inst(store, None), format!(
opt_ctx.ctx.func.dfg.display_inst(load, None), "Potential store -> load forwarding {} -> {} ({:?}, {:?})",
spatial_overlap(&opt_ctx.ctx.func, store, load), opt_ctx.ctx.func.dfg.display_inst(store, None),
temporal_order(&opt_ctx.ctx, store, load), opt_ctx.ctx.func.dfg.display_inst(load, None),
)); spatial_overlap(&opt_ctx.ctx.func, store, load),
temporal_order(&opt_ctx.ctx, store, load),
),
);
} }
match *potential_stores { match *potential_stores {
@ -208,12 +233,17 @@ pub(super) fn optimize_function(
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
clif_comments.add_comment(load, format!("[BUG?] Reading uninitialized memory")); clif_comments.add_comment(load, format!("[BUG?] Reading uninitialized memory"));
} }
[store] if spatial_overlap(&opt_ctx.ctx.func, store, load) == SpatialOverlap::Full && temporal_order(&opt_ctx.ctx, store, load) == TemporalOrder::DefinitivelyBefore => { [store]
if spatial_overlap(&opt_ctx.ctx.func, store, load) == SpatialOverlap::Full
&& temporal_order(&opt_ctx.ctx, store, load)
== TemporalOrder::DefinitivelyBefore =>
{
// Only one store could have been the origin of the value. // Only one store could have been the origin of the value.
let stored_value = opt_ctx.ctx.func.dfg.inst_args(store)[0]; let stored_value = opt_ctx.ctx.func.dfg.inst_args(store)[0];
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
clif_comments.add_comment(load, format!("Store to load forward {} -> {}", store, load)); clif_comments
.add_comment(load, format!("Store to load forward {} -> {}", store, load));
users.change_load_to_alias(&mut opt_ctx.ctx.func, load, stored_value); users.change_load_to_alias(&mut opt_ctx.ctx.func, load, stored_value);
} }
@ -226,13 +256,16 @@ pub(super) fn optimize_function(
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
for &load in &potential_loads { for &load in &potential_loads {
clif_comments.add_comment(store, format!( clif_comments.add_comment(
"Potential load from store {} <- {} ({:?}, {:?})", store,
opt_ctx.ctx.func.dfg.display_inst(load, None), format!(
opt_ctx.ctx.func.dfg.display_inst(store, None), "Potential load from store {} <- {} ({:?}, {:?})",
spatial_overlap(&opt_ctx.ctx.func, store, load), opt_ctx.ctx.func.dfg.display_inst(load, None),
temporal_order(&opt_ctx.ctx, store, load), opt_ctx.ctx.func.dfg.display_inst(store, None),
)); spatial_overlap(&opt_ctx.ctx.func, store, load),
temporal_order(&opt_ctx.ctx, store, load),
),
);
} }
if potential_loads.is_empty() { if potential_loads.is_empty() {
@ -240,7 +273,14 @@ pub(super) fn optimize_function(
// FIXME also remove stores when there is always a next store before a load. // FIXME also remove stores when there is always a next store before a load.
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
clif_comments.add_comment(store, format!("Remove dead stack store {} of {}", opt_ctx.ctx.func.dfg.display_inst(store, None), stack_slot.0)); clif_comments.add_comment(
store,
format!(
"Remove dead stack store {} of {}",
opt_ctx.ctx.func.dfg.display_inst(store, None),
stack_slot.0
),
);
users.remove_dead_store(&mut opt_ctx.ctx.func, store); users.remove_dead_store(&mut opt_ctx.ctx.func, store);
} }
@ -258,24 +298,52 @@ fn combine_stack_addr_with_load_store(func: &mut Function) {
while let Some(_block) = cursor.next_block() { while let Some(_block) = cursor.next_block() {
while let Some(inst) = cursor.next_inst() { while let Some(inst) = cursor.next_inst() {
match cursor.func.dfg[inst] { match cursor.func.dfg[inst] {
InstructionData::Load { opcode: Opcode::Load, arg: addr, flags: _, offset } => { InstructionData::Load {
if cursor.func.dfg.ctrl_typevar(inst) == types::I128 || cursor.func.dfg.ctrl_typevar(inst).is_vector() { opcode: Opcode::Load,
arg: addr,
flags: _,
offset,
} => {
if cursor.func.dfg.ctrl_typevar(inst) == types::I128
|| cursor.func.dfg.ctrl_typevar(inst).is_vector()
{
continue; // WORKAROUD: stack_load.i128 not yet implemented continue; // WORKAROUD: stack_load.i128 not yet implemented
} }
if let Some((stack_slot, stack_addr_offset)) = try_get_stack_slot_and_offset_for_addr(cursor.func, addr) { if let Some((stack_slot, stack_addr_offset)) =
if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into()) { try_get_stack_slot_and_offset_for_addr(cursor.func, addr)
{
if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into())
{
let ty = cursor.func.dfg.ctrl_typevar(inst); let ty = cursor.func.dfg.ctrl_typevar(inst);
cursor.func.dfg.replace(inst).stack_load(ty, stack_slot, combined_offset); cursor.func.dfg.replace(inst).stack_load(
ty,
stack_slot,
combined_offset,
);
} }
} }
} }
InstructionData::Store { opcode: Opcode::Store, args: [value, addr], flags: _, offset } => { InstructionData::Store {
if cursor.func.dfg.ctrl_typevar(inst) == types::I128 || cursor.func.dfg.ctrl_typevar(inst).is_vector() { opcode: Opcode::Store,
args: [value, addr],
flags: _,
offset,
} => {
if cursor.func.dfg.ctrl_typevar(inst) == types::I128
|| cursor.func.dfg.ctrl_typevar(inst).is_vector()
{
continue; // WORKAROUND: stack_store.i128 not yet implemented continue; // WORKAROUND: stack_store.i128 not yet implemented
} }
if let Some((stack_slot, stack_addr_offset)) = try_get_stack_slot_and_offset_for_addr(cursor.func, addr) { if let Some((stack_slot, stack_addr_offset)) =
if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into()) { try_get_stack_slot_and_offset_for_addr(cursor.func, addr)
cursor.func.dfg.replace(inst).stack_store(value, stack_slot, combined_offset); {
if let Some(combined_offset) = offset.try_add_i64(stack_addr_offset.into())
{
cursor.func.dfg.replace(inst).stack_store(
value,
stack_slot,
combined_offset,
);
} }
} }
} }
@ -296,7 +364,10 @@ fn remove_unused_stack_addr_and_stack_load(opt_ctx: &mut OptimizeContext<'_>) {
if let ValueDef::Result(arg_origin, 0) = cursor.func.dfg.value_def(arg) { if let ValueDef::Result(arg_origin, 0) = cursor.func.dfg.value_def(arg) {
match cursor.func.dfg[arg_origin].opcode() { match cursor.func.dfg[arg_origin].opcode() {
Opcode::StackAddr | Opcode::StackLoad => { Opcode::StackAddr | Opcode::StackLoad => {
stack_addr_load_insts_users.entry(arg_origin).or_insert_with(FxHashSet::default).insert(inst); stack_addr_load_insts_users
.entry(arg_origin)
.or_insert_with(FxHashSet::default)
.insert(inst);
} }
_ => {} _ => {}
} }
@ -309,7 +380,8 @@ fn remove_unused_stack_addr_and_stack_load(opt_ctx: &mut OptimizeContext<'_>) {
for inst in stack_addr_load_insts_users.keys() { for inst in stack_addr_load_insts_users.keys() {
let mut is_recorded_stack_addr_or_stack_load = false; let mut is_recorded_stack_addr_or_stack_load = false;
for stack_slot_users in opt_ctx.stack_slot_usage_map.values() { for stack_slot_users in opt_ctx.stack_slot_usage_map.values() {
is_recorded_stack_addr_or_stack_load |= stack_slot_users.stack_addr.contains(inst) || stack_slot_users.stack_load.contains(inst); is_recorded_stack_addr_or_stack_load |= stack_slot_users.stack_addr.contains(inst)
|| stack_slot_users.stack_load.contains(inst);
} }
assert!(is_recorded_stack_addr_or_stack_load); assert!(is_recorded_stack_addr_or_stack_load);
} }
@ -323,23 +395,37 @@ fn remove_unused_stack_addr_and_stack_load(opt_ctx: &mut OptimizeContext<'_>) {
for stack_slot_users in opt_ctx.stack_slot_usage_map.values_mut() { for stack_slot_users in opt_ctx.stack_slot_usage_map.values_mut() {
stack_slot_users stack_slot_users
.stack_addr .stack_addr
.drain_filter(|inst| !(stack_addr_load_insts_users.get(inst).map(|users| users.is_empty()).unwrap_or(true))) .drain_filter(|inst| {
!(stack_addr_load_insts_users
.get(inst)
.map(|users| users.is_empty())
.unwrap_or(true))
})
.for_each(|inst| StackSlotUsage::remove_unused_stack_addr(&mut func, inst)); .for_each(|inst| StackSlotUsage::remove_unused_stack_addr(&mut func, inst));
stack_slot_users stack_slot_users
.stack_load .stack_load
.drain_filter(|inst| !(stack_addr_load_insts_users.get(inst).map(|users| users.is_empty()).unwrap_or(true))) .drain_filter(|inst| {
!(stack_addr_load_insts_users
.get(inst)
.map(|users| users.is_empty())
.unwrap_or(true))
})
.for_each(|inst| StackSlotUsage::remove_unused_load(&mut func, inst)); .for_each(|inst| StackSlotUsage::remove_unused_load(&mut func, inst));
} }
} }
fn try_get_stack_slot_and_offset_for_addr(func: &Function, addr: Value) -> Option<(StackSlot, Offset32)> { fn try_get_stack_slot_and_offset_for_addr(
func: &Function,
addr: Value,
) -> Option<(StackSlot, Offset32)> {
if let ValueDef::Result(addr_inst, 0) = func.dfg.value_def(addr) { if let ValueDef::Result(addr_inst, 0) = func.dfg.value_def(addr) {
if let InstructionData::StackLoad { if let InstructionData::StackLoad {
opcode: Opcode::StackAddr, opcode: Opcode::StackAddr,
stack_slot, stack_slot,
offset, offset,
} = func.dfg[addr_inst] { } = func.dfg[addr_inst]
{
return Some((stack_slot, offset)); return Some((stack_slot, offset));
} }
} }
@ -390,7 +476,10 @@ fn spatial_overlap(func: &Function, src: Inst, dest: Inst) -> SpatialOverlap {
} }
let src_end: i64 = src_offset.try_add_i64(i64::from(src_size)).unwrap().into(); let src_end: i64 = src_offset.try_add_i64(i64::from(src_size)).unwrap().into();
let dest_end: i64 = dest_offset.try_add_i64(i64::from(dest_size)).unwrap().into(); let dest_end: i64 = dest_offset
.try_add_i64(i64::from(dest_size))
.unwrap()
.into();
if src_end <= dest_offset.into() || dest_end <= src_offset.into() { if src_end <= dest_offset.into() || dest_end <= src_offset.into() {
return SpatialOverlap::No; return SpatialOverlap::No;
} }

View File

@ -32,7 +32,10 @@ impl Pointer {
} }
} }
pub(crate) fn const_addr<'a, 'tcx>(fx: &mut FunctionCx<'a, 'tcx, impl Backend>, addr: i64) -> Self { pub(crate) fn const_addr<'a, 'tcx>(
fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
addr: i64,
) -> Self {
let addr = fx.bcx.ins().iconst(fx.pointer_type, addr); let addr = fx.bcx.ins().iconst(fx.pointer_type, addr);
Pointer { Pointer {
base: PointerBase::Addr(addr), base: PointerBase::Addr(addr),
@ -62,10 +65,15 @@ impl Pointer {
fx.bcx.ins().iadd_imm(base_addr, offset) fx.bcx.ins().iadd_imm(base_addr, offset)
} }
} }
PointerBase::Stack(stack_slot) => fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset), PointerBase::Stack(stack_slot) => {
PointerBase::Dangling(align) => { fx.bcx
fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()) .ins()
.stack_addr(fx.pointer_type, stack_slot, self.offset)
} }
PointerBase::Dangling(align) => fx
.bcx
.ins()
.iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()),
} }
} }
@ -89,11 +97,16 @@ impl Pointer {
} }
} else { } else {
let base_offset: i64 = self.offset.into(); let base_offset: i64 = self.offset.into();
if let Some(new_offset) = base_offset.checked_add(extra_offset){ if let Some(new_offset) = base_offset.checked_add(extra_offset) {
let base_addr = match self.base { let base_addr = match self.base {
PointerBase::Addr(addr) => addr, PointerBase::Addr(addr) => addr,
PointerBase::Stack(stack_slot) => fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0), PointerBase::Stack(stack_slot) => {
PointerBase::Dangling(align) => fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()), fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0)
}
PointerBase::Dangling(align) => fx
.bcx
.ins()
.iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()),
}; };
let addr = fx.bcx.ins().iadd_imm(base_addr, new_offset); let addr = fx.bcx.ins().iadd_imm(base_addr, new_offset);
Pointer { Pointer {
@ -101,7 +114,10 @@ impl Pointer {
offset: Offset32::new(0), offset: Offset32::new(0),
} }
} else { } else {
panic!("self.offset ({}) + extra_offset ({}) not representable in i64", base_offset, extra_offset); panic!(
"self.offset ({}) + extra_offset ({}) not representable in i64",
base_offset, extra_offset
);
} }
} }
} }
@ -117,14 +133,20 @@ impl Pointer {
offset: self.offset, offset: self.offset,
}, },
PointerBase::Stack(stack_slot) => { PointerBase::Stack(stack_slot) => {
let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset); let base_addr = fx
.bcx
.ins()
.stack_addr(fx.pointer_type, stack_slot, self.offset);
Pointer { Pointer {
base: PointerBase::Addr(fx.bcx.ins().iadd(base_addr, extra_offset)), base: PointerBase::Addr(fx.bcx.ins().iadd(base_addr, extra_offset)),
offset: Offset32::new(0), offset: Offset32::new(0),
} }
} }
PointerBase::Dangling(align) => { PointerBase::Dangling(align) => {
let addr = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap()); let addr = fx
.bcx
.ins()
.iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap());
Pointer { Pointer {
base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)), base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
offset: self.offset, offset: self.offset,
@ -141,12 +163,14 @@ impl Pointer {
) -> Value { ) -> Value {
match self.base { match self.base {
PointerBase::Addr(base_addr) => fx.bcx.ins().load(ty, flags, base_addr, self.offset), PointerBase::Addr(base_addr) => fx.bcx.ins().load(ty, flags, base_addr, self.offset),
PointerBase::Stack(stack_slot) => if ty == types::I128 || ty.is_vector() { PointerBase::Stack(stack_slot) => {
// WORKAROUND for stack_load.i128 and stack_load.iXxY not being implemented if ty == types::I128 || ty.is_vector() {
let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0); // WORKAROUND for stack_load.i128 and stack_load.iXxY not being implemented
fx.bcx.ins().load(ty, flags, base_addr, self.offset) let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
} else { fx.bcx.ins().load(ty, flags, base_addr, self.offset)
fx.bcx.ins().stack_load(ty, stack_slot, self.offset) } else {
fx.bcx.ins().stack_load(ty, stack_slot, self.offset)
}
} }
PointerBase::Dangling(_align) => unreachable!(), PointerBase::Dangling(_align) => unreachable!(),
} }

View File

@ -196,7 +196,7 @@ impl<B: Backend + 'static> FunctionCx<'_, '_, B> {
entity: E, entity: E,
comment: S, comment: S,
) { ) {
self.clif_comments.add_comment(entity, comment); self.clif_comments.add_comment(entity, comment);
} }
} }
@ -210,7 +210,13 @@ pub(crate) fn write_clif_file<'tcx>(
) { ) {
use std::io::Write; use std::io::Write;
if !cfg!(debug_assertions) && !tcx.sess.opts.output_types.contains_key(&OutputType::LlvmAssembly) { if !cfg!(debug_assertions)
&& !tcx
.sess
.opts
.output_types
.contains_key(&OutputType::LlvmAssembly)
{
return; return;
} }

View File

@ -107,8 +107,10 @@ const POWERPC_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("vsx", Some(sym::powerpc_target_feature)), ("vsx", Some(sym::powerpc_target_feature)),
]; ];
const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
&[("fp64", Some(sym::mips_target_feature)), ("msa", Some(sym::mips_target_feature))]; ("fp64", Some(sym::mips_target_feature)),
("msa", Some(sym::mips_target_feature)),
];
const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[ const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("m", Some(sym::riscv_target_feature)), ("m", Some(sym::riscv_target_feature)),

View File

@ -7,9 +7,10 @@ use rustc_target::spec::LinkerFlavor;
/// Tries to infer the path of a binary for the target toolchain from the linker name. /// Tries to infer the path of a binary for the target toolchain from the linker name.
pub(crate) fn get_toolchain_binary(sess: &Session, tool: &str) -> PathBuf { pub(crate) fn get_toolchain_binary(sess: &Session, tool: &str) -> PathBuf {
let (mut linker, _linker_flavor) = linker_and_flavor(sess); let (mut linker, _linker_flavor) = linker_and_flavor(sess);
let linker_file_name = linker.file_name().and_then(|name| name.to_str()).unwrap_or_else(|| { let linker_file_name = linker
sess.fatal("couldn't extract file name from specified linker") .file_name()
}); .and_then(|name| name.to_str())
.unwrap_or_else(|| sess.fatal("couldn't extract file name from specified linker"));
if linker_file_name == "ld.lld" { if linker_file_name == "ld.lld" {
if tool != "ld" { if tool != "ld" {
@ -68,9 +69,12 @@ fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
flavor, flavor,
)), )),
(Some(linker), None) => { (Some(linker), None) => {
let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| { let stem = linker
sess.fatal("couldn't extract file stem from specified linker") .file_stem()
}); .and_then(|stem| stem.to_str())
.unwrap_or_else(|| {
sess.fatal("couldn't extract file stem from specified linker")
});
let flavor = if stem == "emcc" { let flavor = if stem == "emcc" {
LinkerFlavor::Em LinkerFlavor::Em
@ -99,7 +103,11 @@ fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
// linker and linker flavor specified via command line have precedence over what the target // linker and linker flavor specified via command line have precedence over what the target
// specification specifies // specification specifies
if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), sess.opts.cg.linker_flavor) { if let Some(ret) = infer_from(
sess,
sess.opts.cg.linker.clone(),
sess.opts.cg.linker_flavor,
) {
return ret; return ret;
} }

View File

@ -2,7 +2,8 @@ use crate::prelude::*;
fn codegen_print(fx: &mut FunctionCx<'_, '_, impl cranelift_module::Backend>, msg: &str) { fn codegen_print(fx: &mut FunctionCx<'_, '_, impl cranelift_module::Backend>, msg: &str) {
let puts = fx let puts = fx
.cx.module .cx
.module
.declare_function( .declare_function(
"puts", "puts",
Linkage::Import, Linkage::Import,

View File

@ -33,7 +33,8 @@ fn codegen_field<'tcx>(
_ => { _ => {
// We have to align the offset for DST's // We have to align the offset for DST's
let unaligned_offset = field_offset.bytes(); let unaligned_offset = field_offset.bytes();
let (_, unsized_align) = crate::unsize::size_and_align_of_dst(fx, field_layout, extra); let (_, unsized_align) =
crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1); let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
let align_sub_1 = fx.bcx.ins().isub(unsized_align, one); let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
@ -42,10 +43,7 @@ fn codegen_field<'tcx>(
let and_rhs = fx.bcx.ins().isub(zero, unsized_align); let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
let offset = fx.bcx.ins().band(and_lhs, and_rhs); let offset = fx.bcx.ins().band(and_lhs, and_rhs);
( (base.offset_value(fx, offset), field_layout)
base.offset_value(fx, offset),
field_layout,
)
} }
} }
} else { } else {
@ -53,7 +51,11 @@ fn codegen_field<'tcx>(
} }
} }
fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: &Scalar, b_scalar: &Scalar) -> Offset32 { fn scalar_pair_calculate_b_offset(
tcx: TyCtxt<'_>,
a_scalar: &Scalar,
b_scalar: &Scalar,
) -> Offset32 {
let b_offset = a_scalar let b_offset = a_scalar
.value .value
.size(&tcx) .size(&tcx)
@ -77,7 +79,11 @@ impl<'tcx> CValue<'tcx> {
CValue(CValueInner::ByRef(ptr, None), layout) CValue(CValueInner::ByRef(ptr, None), layout)
} }
pub(crate) fn by_ref_unsized(ptr: Pointer, meta: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> { pub(crate) fn by_ref_unsized(
ptr: Pointer,
meta: Value,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
CValue(CValueInner::ByRef(ptr, Some(meta)), layout) CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
} }
@ -85,7 +91,11 @@ impl<'tcx> CValue<'tcx> {
CValue(CValueInner::ByVal(value), layout) CValue(CValueInner::ByVal(value), layout)
} }
pub(crate) fn by_val_pair(value: Value, extra: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> { pub(crate) fn by_val_pair(
value: Value,
extra: Value,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
CValue(CValueInner::ByValPair(value, extra), layout) CValue(CValueInner::ByValPair(value, extra), layout)
} }
@ -94,7 +104,10 @@ impl<'tcx> CValue<'tcx> {
} }
// FIXME remove // FIXME remove
pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> (Pointer, Option<Value>) { pub(crate) fn force_stack(
self,
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
) -> (Pointer, Option<Value>) {
let layout = self.1; let layout = self.1;
match self.0 { match self.0 {
CValueInner::ByRef(ptr, meta) => (ptr, meta), CValueInner::ByRef(ptr, meta) => (ptr, meta),
@ -122,7 +135,8 @@ impl<'tcx> CValue<'tcx> {
Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()), Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
Abi::Vector { ref element, count } => { Abi::Vector { ref element, count } => {
scalar_to_clif_type(fx.tcx, element.clone()) scalar_to_clif_type(fx.tcx, element.clone())
.by(u16::try_from(count).unwrap()).unwrap() .by(u16::try_from(count).unwrap())
.unwrap()
} }
_ => unreachable!("{:?}", layout.ty), _ => unreachable!("{:?}", layout.ty),
}; };
@ -153,7 +167,9 @@ impl<'tcx> CValue<'tcx> {
let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, MemFlags::new()); let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, MemFlags::new());
(val1, val2) (val1, val2)
} }
CValueInner::ByRef(_, Some(_)) => bug!("load_scalar_pair for unsized value not allowed"), CValueInner::ByRef(_, Some(_)) => {
bug!("load_scalar_pair for unsized value not allowed")
}
CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"), CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
CValueInner::ByValPair(val1, val2) => (val1, val2), CValueInner::ByValPair(val1, val2) => (val1, val2),
} }
@ -166,33 +182,29 @@ impl<'tcx> CValue<'tcx> {
) -> CValue<'tcx> { ) -> CValue<'tcx> {
let layout = self.1; let layout = self.1;
match self.0 { match self.0 {
CValueInner::ByVal(val) => { CValueInner::ByVal(val) => match layout.abi {
match layout.abi { Abi::Vector { element: _, count } => {
Abi::Vector { element: _, count } => { let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???"); let field = u8::try_from(field.index()).unwrap();
let field = u8::try_from(field.index()).unwrap(); assert!(field < count);
assert!(field < count); let lane = fx.bcx.ins().extractlane(val, field);
let lane = fx.bcx.ins().extractlane(val, field); let field_layout = layout.field(&*fx, usize::from(field));
let field_layout = layout.field(&*fx, usize::from(field)); CValue::by_val(lane, field_layout)
CValue::by_val(lane, field_layout)
}
_ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
} }
} _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
CValueInner::ByValPair(val1, val2) => { },
match layout.abi { CValueInner::ByValPair(val1, val2) => match layout.abi {
Abi::ScalarPair(_, _) => { Abi::ScalarPair(_, _) => {
let val = match field.as_u32() { let val = match field.as_u32() {
0 => val1, 0 => val1,
1 => val2, 1 => val2,
_ => bug!("field should be 0 or 1"), _ => bug!("field should be 0 or 1"),
}; };
let field_layout = layout.field(&*fx, usize::from(field)); let field_layout = layout.field(&*fx, usize::from(field));
CValue::by_val(val, field_layout) CValue::by_val(val, field_layout)
}
_ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
} }
} _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
},
CValueInner::ByRef(ptr, None) => { CValueInner::ByRef(ptr, None) => {
let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field); let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
CValue::by_ref(field_ptr, field_layout) CValue::by_ref(field_ptr, field_layout)
@ -201,7 +213,11 @@ impl<'tcx> CValue<'tcx> {
} }
} }
pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>, dest: CPlace<'tcx>) { pub(crate) fn unsize_value(
self,
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
dest: CPlace<'tcx>,
) {
crate::unsize::coerce_unsized_into(fx, self, dest); crate::unsize::coerce_unsized_into(fx, self, dest);
} }
@ -217,7 +233,11 @@ impl<'tcx> CValue<'tcx> {
match layout.ty.kind { match layout.ty.kind {
ty::Bool => { ty::Bool => {
assert!(const_val == 0 || const_val == 1, "Invalid bool 0x{:032X}", const_val); assert!(
const_val == 0 || const_val == 1,
"Invalid bool 0x{:032X}",
const_val
);
} }
_ => {} _ => {}
} }
@ -254,8 +274,14 @@ impl<'tcx> CValue<'tcx> {
} }
pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self { pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
assert!(matches!(self.layout().ty.kind, ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..))); assert!(matches!(
assert!(matches!(layout.ty.kind, ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..))); self.layout().ty.kind,
ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
));
assert!(matches!(
layout.ty.kind,
ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)
));
assert_eq!(self.layout().abi, layout.abi); assert_eq!(self.layout().abi, layout.abi);
CValue(self.0, layout) CValue(self.0, layout)
} }
@ -319,8 +345,7 @@ impl<'tcx> CPlace<'tcx> {
) -> CPlace<'tcx> { ) -> CPlace<'tcx> {
let var = Variable::with_u32(fx.next_ssa_var); let var = Variable::with_u32(fx.next_ssa_var);
fx.next_ssa_var += 1; fx.next_ssa_var += 1;
fx.bcx fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
.declare_var(var, fx.clif_type(layout.ty).unwrap());
CPlace { CPlace {
inner: CPlaceInner::Var(local, var), inner: CPlaceInner::Var(local, var),
layout, layout,
@ -353,7 +378,11 @@ impl<'tcx> CPlace<'tcx> {
} }
} }
pub(crate) fn for_ptr_with_extra(ptr: Pointer, extra: Value, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> { pub(crate) fn for_ptr_with_extra(
ptr: Pointer,
extra: Value,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
CPlace { CPlace {
inner: CPlaceInner::Addr(ptr, Some(extra)), inner: CPlaceInner::Addr(ptr, Some(extra)),
layout, layout,
@ -365,19 +394,23 @@ impl<'tcx> CPlace<'tcx> {
match self.inner { match self.inner {
CPlaceInner::Var(_local, var) => { CPlaceInner::Var(_local, var) => {
let val = fx.bcx.use_var(var); let val = fx.bcx.use_var(var);
fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index())); fx.bcx
.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
CValue::by_val(val, layout) CValue::by_val(val, layout)
} }
CPlaceInner::VarPair(_local, var1, var2) => { CPlaceInner::VarPair(_local, var1, var2) => {
let val1 = fx.bcx.use_var(var1); let val1 = fx.bcx.use_var(var1);
fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index())); fx.bcx
.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
let val2 = fx.bcx.use_var(var2); let val2 = fx.bcx.use_var(var2);
fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index())); fx.bcx
.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
CValue::by_val_pair(val1, val2, layout) CValue::by_val_pair(val1, val2, layout)
} }
CPlaceInner::VarLane(_local, var, lane) => { CPlaceInner::VarLane(_local, var, lane) => {
let val = fx.bcx.use_var(var); let val = fx.bcx.use_var(var);
fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index())); fx.bcx
.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
let val = fx.bcx.ins().extractlane(val, lane); let val = fx.bcx.ins().extractlane(val, lane);
CValue::by_val(val, layout) CValue::by_val(val, layout)
} }
@ -407,7 +440,11 @@ impl<'tcx> CPlace<'tcx> {
} }
} }
pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>, from: CValue<'tcx>) { pub(crate) fn write_cvalue(
self,
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
from: CValue<'tcx>,
) {
fn assert_assignable<'tcx>( fn assert_assignable<'tcx>(
fx: &FunctionCx<'_, 'tcx, impl Backend>, fx: &FunctionCx<'_, 'tcx, impl Backend>,
from_ty: Ty<'tcx>, from_ty: Ty<'tcx>,
@ -415,7 +452,10 @@ impl<'tcx> CPlace<'tcx> {
) { ) {
match (&from_ty.kind, &to_ty.kind) { match (&from_ty.kind, &to_ty.kind) {
(ty::Ref(_, a, _), ty::Ref(_, b, _)) (ty::Ref(_, a, _), ty::Ref(_, b, _))
| (ty::RawPtr(TypeAndMut { ty: a, mutbl: _}), ty::RawPtr(TypeAndMut { ty: b, mutbl: _})) => { | (
ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
) => {
assert_assignable(fx, a, b); assert_assignable(fx, a, b);
} }
(ty::FnPtr(_), ty::FnPtr(_)) => { (ty::FnPtr(_), ty::FnPtr(_)) => {
@ -478,8 +518,7 @@ impl<'tcx> CPlace<'tcx> {
self, self,
fx: &mut FunctionCx<'_, 'tcx, impl Backend>, fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
from: CValue<'tcx>, from: CValue<'tcx>,
#[cfg_attr(not(debug_assertions), allow(unused_variables))] #[cfg_attr(not(debug_assertions), allow(unused_variables))] method: &'static str,
method: &'static str,
) { ) {
fn transmute_value<'tcx>( fn transmute_value<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>, fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
@ -492,16 +531,17 @@ impl<'tcx> CPlace<'tcx> {
(_, _) if src_ty == dst_ty => data, (_, _) if src_ty == dst_ty => data,
// This is a `write_cvalue_transmute`. // This is a `write_cvalue_transmute`.
(types::I32, types::F32) | (types::F32, types::I32) (types::I32, types::F32)
| (types::I64, types::F64) | (types::F64, types::I64) => { | (types::F32, types::I32)
fx.bcx.ins().bitcast(dst_ty, data) | (types::I64, types::F64)
} | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
_ if src_ty.is_vector() && dst_ty.is_vector() => { _ if src_ty.is_vector() && dst_ty.is_vector() => {
fx.bcx.ins().raw_bitcast(dst_ty, data) fx.bcx.ins().raw_bitcast(dst_ty, data)
} }
_ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty), _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
}; };
fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index())); fx.bcx
.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
fx.bcx.def_var(var, data); fx.bcx.def_var(var, data);
} }
@ -516,7 +556,14 @@ impl<'tcx> CPlace<'tcx> {
}; };
fx.add_comment( fx.add_comment(
fx.bcx.func.layout.last_inst(cur_block).unwrap(), fx.bcx.func.layout.last_inst(cur_block).unwrap(),
format!("{}: {:?}: {:?} <- {:?}: {:?}", method, self.inner(), self.layout().ty, from.0, from.layout().ty), format!(
"{}: {:?}: {:?} <- {:?}: {:?}",
method,
self.inner(),
self.layout().ty,
from.0,
from.layout().ty
),
); );
} }
@ -540,13 +587,15 @@ impl<'tcx> CPlace<'tcx> {
// First get the old vector // First get the old vector
let vector = fx.bcx.use_var(var); let vector = fx.bcx.use_var(var);
fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index())); fx.bcx
.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
// Next insert the written lane into the vector // Next insert the written lane into the vector
let vector = fx.bcx.ins().insertlane(vector, data, lane); let vector = fx.bcx.ins().insertlane(vector, data, lane);
// Finally write the new vector // Finally write the new vector
fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index())); fx.bcx
.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
fx.bcx.def_var(var, vector); fx.bcx.def_var(var, vector);
return; return;
@ -571,7 +620,9 @@ impl<'tcx> CPlace<'tcx> {
let (value, extra) = from.load_scalar_pair(fx); let (value, extra) = from.load_scalar_pair(fx);
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
to_ptr.store(fx, value, MemFlags::new()); to_ptr.store(fx, value, MemFlags::new());
to_ptr.offset(fx, b_offset).store(fx, extra, MemFlags::new()); to_ptr
.offset(fx, b_offset)
.store(fx, extra, MemFlags::new());
return; return;
} }
_ => {} _ => {}
@ -628,14 +679,18 @@ impl<'tcx> CPlace<'tcx> {
let layout = layout.field(&*fx, field.index()); let layout = layout.field(&*fx, field.index());
match field.as_u32() { match field.as_u32() {
0 => return CPlace { 0 => {
inner: CPlaceInner::Var(local, var1), return CPlace {
layout, inner: CPlaceInner::Var(local, var1),
}, layout,
1 => return CPlace { }
inner: CPlaceInner::Var(local, var2), }
layout, 1 => {
}, return CPlace {
inner: CPlaceInner::Var(local, var2),
layout,
}
}
_ => unreachable!("field should be 0 or 1"), _ => unreachable!("field should be 0 or 1"),
} }
} }
@ -677,7 +732,10 @@ impl<'tcx> CPlace<'tcx> {
let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx); let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout) CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
} else { } else {
CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout) CPlace::for_ptr(
Pointer::new(self.to_cvalue(fx).load_scalar(fx)),
inner_layout,
)
} }
} }

View File

@ -51,10 +51,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>(
arg.load_scalar_pair(fx) arg.load_scalar_pair(fx)
} else { } else {
let (ptr, vtable) = arg.try_to_ptr().unwrap(); let (ptr, vtable) = arg.try_to_ptr().unwrap();
( (ptr.get_addr(fx), vtable.unwrap())
ptr.get_addr(fx),
vtable.unwrap()
)
}; };
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes(); let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
@ -92,8 +89,11 @@ fn build_vtable<'tcx>(
let tcx = fx.tcx; let tcx = fx.tcx;
let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize; let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
let drop_in_place_fn = let drop_in_place_fn = import_function(
import_function(tcx, &mut fx.cx.module, Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.tcx)); tcx,
&mut fx.cx.module,
Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.tcx),
);
let mut components: Vec<_> = vec![Some(drop_in_place_fn), None, None]; let mut components: Vec<_> = vec![Some(drop_in_place_fn), None, None];
@ -109,7 +109,9 @@ fn build_vtable<'tcx>(
Some(import_function( Some(import_function(
tcx, tcx,
&mut fx.cx.module, &mut fx.cx.module,
Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs).unwrap().polymorphize(fx.tcx), Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs)
.unwrap()
.polymorphize(fx.tcx),
)) ))
}) })
}); });
@ -133,7 +135,8 @@ fn build_vtable<'tcx>(
} }
let data_id = fx let data_id = fx
.cx.module .cx
.module
.declare_data( .declare_data(
&format!( &format!(
"__vtable.{}.for.{:?}.{}", "__vtable.{}.for.{:?}.{}",