Merge commit 'e228f0c16ea8c34794a6285bf57aab627c26b147' into libgccjit-codegen

This commit is contained in:
Antoni Boucher 2021-08-15 08:29:07 -04:00
commit 3d5d4e324d
27 changed files with 265 additions and 3053 deletions

View File

@ -21,7 +21,7 @@ echo "[GIT] add"
git add . git add .
echo "[GIT] commit" echo "[GIT] commit"
# This is needed on virgin system where nothing is configured. # This is needed on systems where nothing is configured.
# git really needs something here, or it will fail. # git really needs something here, or it will fail.
# Even using --author is not enough. # Even using --author is not enough.
git config user.email || git config user.email "none@example.com" git config user.email || git config user.email "none@example.com"

View File

@ -31,9 +31,8 @@ if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
fi fi
export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot' export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot'
#export RUSTFLAGS=$linker' -Cpanic=abort -Cdebuginfo=2 -Zpanic-abort-tests -Zcodegen-backend='$(pwd)'/target/'$CHANNEL'/librustc_codegen_gcc.'$dylib_ext' --sysroot '$(pwd)'/build_sysroot/sysroot -Clto=fat -Cembed-bitcode=yes'
# FIXME remove once the atomic shim is gone # FIXME(antoyo): remove once the atomic shim is gone
if [[ `uname` == 'Darwin' ]]; then if [[ `uname` == 'Darwin' ]]; then
export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup" export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
fi fi
@ -43,6 +42,3 @@ export RUSTC_LOG=warn # display metadata load errors
export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/$TARGET_TRIPLE/lib:$GCC_PATH" export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/$TARGET_TRIPLE/lib:$GCC_PATH"
export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
export CG_CLIF_DISPLAY_CG_TIME=1
export CG_CLIF_INCR_CACHE_DISABLED=1

View File

@ -253,7 +253,7 @@ fn main() {
} }
} }
// TODO: not sure about this assert. ABC is not defined, so should it be really 0? // TODO(antoyo): to make this work, support weak linkage.
//unsafe { assert_eq!(ABC as usize, 0); } //unsafe { assert_eq!(ABC as usize, 0); }
&mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>; &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;

View File

@ -16,7 +16,6 @@ fn main() {
let stderr = ::std::io::stderr(); let stderr = ::std::io::stderr();
let mut stderr = stderr.lock(); let mut stderr = stderr.lock();
// FIXME: this thread panics.
std::thread::spawn(move || { std::thread::spawn(move || {
println!("Hello from another thread!"); println!("Hello from another thread!");
}); });
@ -56,7 +55,7 @@ fn main() {
assert_eq!(-32768i16, (-32768i16).saturating_add(-32768)); assert_eq!(-32768i16, (-32768i16).saturating_add(-32768));
assert_eq!(32767i16, 32767i16.saturating_add(1)); assert_eq!(32767i16, 32767i16.saturating_add(1));
/*assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26); assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7); assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
let _d = 0i128.checked_div(2i128); let _d = 0i128.checked_div(2i128);
@ -85,7 +84,7 @@ fn main() {
assert_eq!(houndred_i128 as f32, 100.0); assert_eq!(houndred_i128 as f32, 100.0);
assert_eq!(houndred_i128 as f64, 100.0); assert_eq!(houndred_i128 as f64, 100.0);
assert_eq!(houndred_f32 as i128, 100); assert_eq!(houndred_f32 as i128, 100);
assert_eq!(houndred_f64 as i128, 100);*/ assert_eq!(houndred_f64 as i128, 100);
let _a = 1u32 << 2u8; let _a = 1u32 << 2u8;

View File

@ -0,0 +1 @@
/home/bouanto/Ordinateur/Programmation/Projets/gcc-build/build/gcc

View File

@ -11,8 +11,7 @@ use crate::type_of::LayoutGccExt;
impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) { fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) {
// TODO // TODO(antoyo)
//fn_abi.apply_attrs_callsite(self, callsite)
} }
fn get_param(&self, index: usize) -> Self::Value { fn get_param(&self, index: usize) -> Self::Value {
@ -87,12 +86,9 @@ impl GccType for Reg {
} }
pub trait FnAbiGccExt<'gcc, 'tcx> { pub trait FnAbiGccExt<'gcc, 'tcx> {
// TODO: return a function pointer type instead? // TODO(antoyo): return a function pointer type instead?
fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool); fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool);
fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
/*fn llvm_cconv(&self) -> llvm::CallConv;
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);*/
} }
impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
@ -145,12 +141,7 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
continue; continue;
} }
PassMode::Indirect { extra_attrs: Some(_), .. } => { PassMode::Indirect { extra_attrs: Some(_), .. } => {
/*let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
let ptr_layout = cx.layout_of(ptr_ty);
argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 0, true));
argument_tys.push(ptr_layout.scalar_pair_element_gcc_type(cx, 1, true));*/
unimplemented!(); unimplemented!();
//continue;
} }
PassMode::Cast(cast) => cast.gcc_type(cx), PassMode::Cast(cast) => cast.gcc_type(cx),
PassMode::Indirect { extra_attrs: None, .. } => cx.type_ptr_to(arg.memory_ty(cx)), PassMode::Indirect { extra_attrs: None, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
@ -166,121 +157,4 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
let pointer_type = cx.context.new_function_pointer_type(None, return_type, &params, variadic); let pointer_type = cx.context.new_function_pointer_type(None, return_type, &params, variadic);
pointer_type pointer_type
} }
/*fn llvm_cconv(&self) -> llvm::CallConv {
match self.conv {
Conv::C | Conv::Rust => llvm::CCallConv,
Conv::AmdGpuKernel => llvm::AmdGpuKernel,
Conv::ArmAapcs => llvm::ArmAapcsCallConv,
Conv::Msp430Intr => llvm::Msp430Intr,
Conv::PtxKernel => llvm::PtxKernel,
Conv::X86Fastcall => llvm::X86FastcallCallConv,
Conv::X86Intr => llvm::X86_Intr,
Conv::X86Stdcall => llvm::X86StdcallCallConv,
Conv::X86ThisCall => llvm::X86_ThisCall,
Conv::X86VectorCall => llvm::X86_VectorCall,
Conv::X86_64SysV => llvm::X86_64_SysV,
Conv::X86_64Win64 => llvm::X86_64_Win64,
}
}
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
// FIXME(eddyb) can this also be applied to callsites?
if self.ret.layout.abi.is_uninhabited() {
llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
}
// FIXME(eddyb, wesleywiser): apply this to callsites as well?
if !self.can_unwind {
llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn);
}
let mut i = 0;
let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty);
i += 1;
};
match self.ret.mode {
PassMode::Direct(ref attrs) => {
attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn, None);
}
PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(cx))),
_ => {}
}
for arg in &self.args {
if arg.pad.is_some() {
apply(&ArgAttributes::new(), None);
}
match arg.mode {
PassMode::Ignore => {}
PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
apply(attrs, Some(arg.layout.gcc_type(cx)))
}
PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
apply(attrs, None);
apply(extra_attrs, None);
}
PassMode::Pair(ref a, ref b) => {
apply(a, None);
apply(b, None);
}
PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
}
}
}
fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
// FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite.
let mut i = 0;
let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite, ty);
i += 1;
};
match self.ret.mode {
PassMode::Direct(ref attrs) => {
attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite, None);
}
PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.gcc_type(bx))),
_ => {}
}
if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi {
// If the value is a boolean, the range is 0..2 and that ultimately
// become 0..0 when the type becomes i1, which would be rejected
// by the LLVM verifier.
if let Int(..) = scalar.value {
if !scalar.is_bool() {
let range = scalar.valid_range_exclusive(bx);
if range.start != range.end {
bx.range_metadata(callsite, range);
}
}
}
}
for arg in &self.args {
if arg.pad.is_some() {
apply(&ArgAttributes::new(), None);
}
match arg.mode {
PassMode::Ignore => {}
PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
apply(attrs, Some(arg.layout.gcc_type(bx)))
}
PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
apply(attrs, None);
apply(extra_attrs, None);
}
PassMode::Pair(ref a, ref b) => {
apply(a, None);
apply(b, None);
}
PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
}
}
let cconv = self.llvm_cconv();
if cconv != llvm::CCallConv {
llvm::SetInstructionCallConv(callsite, cconv);
}
}*/
} }

View File

@ -1,4 +1,3 @@
//use crate::attributes;
use gccjit::{FunctionType, ToRValue}; use gccjit::{FunctionType, ToRValue};
use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS}; use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
use rustc_middle::bug; use rustc_middle::bug;
@ -50,11 +49,10 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, kind: Alloc
let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false); let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false);
if tcx.sess.target.options.default_hidden_visibility { if tcx.sess.target.options.default_hidden_visibility {
//llvm::LLVMRustSetVisibility(func, llvm::Visibility::Hidden); // TODO(antoyo): set visibility.
} }
if tcx.sess.must_emit_unwind_tables() { if tcx.sess.must_emit_unwind_tables() {
// TODO // TODO(antoyo): emit unwind tables.
//attributes::emit_uwtable(func, true);
} }
let callee = kind.fn_name(method.name); let callee = kind.fn_name(method.name);
@ -62,7 +60,7 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, kind: Alloc
.map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index))) .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
.collect(); .collect();
let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false); let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false);
//llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden); // TODO(antoyo): set visibility.
let block = func.new_block("entry"); let block = func.new_block("entry");

View File

@ -15,106 +15,8 @@ use crate::type_of::LayoutGccExt;
impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn codegen_llvm_inline_asm(&mut self, _ia: &LlvmInlineAsmInner, _outputs: Vec<PlaceRef<'tcx, RValue<'gcc>>>, mut _inputs: Vec<RValue<'gcc>>, _span: Span) -> bool { fn codegen_llvm_inline_asm(&mut self, _ia: &LlvmInlineAsmInner, _outputs: Vec<PlaceRef<'tcx, RValue<'gcc>>>, mut _inputs: Vec<RValue<'gcc>>, _span: Span) -> bool {
// TODO // TODO(antoyo)
return true; return true;
/*let mut ext_constraints = vec![];
let mut output_types = vec![];
// Prepare the output operands
let mut indirect_outputs = vec![];
for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() {
if out.is_rw {
let operand = self.load_operand(place);
if let OperandValue::Immediate(_) = operand.val {
inputs.push(operand.immediate());
}
ext_constraints.push(i.to_string());
}
if out.is_indirect {
let operand = self.load_operand(place);
if let OperandValue::Immediate(_) = operand.val {
indirect_outputs.push(operand.immediate());
}
} else {
output_types.push(place.layout.gcc_type(self.cx()));
}
}
if !indirect_outputs.is_empty() {
indirect_outputs.extend_from_slice(&inputs);
inputs = indirect_outputs;
}
let clobbers = ia.clobbers.iter().map(|s| format!("~{{{}}}", &s));
// Default per-arch clobbers
// Basically what clang does
let arch_clobbers = match &self.sess().target.target.arch[..] {
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
"mips" | "mips64" => vec!["~{$1}"],
_ => Vec::new(),
};
let all_constraints = ia
.outputs
.iter()
.map(|out| out.constraint.to_string())
.chain(ia.inputs.iter().map(|s| s.to_string()))
.chain(ext_constraints)
.chain(clobbers)
.chain(arch_clobbers.iter().map(|s| (*s).to_string()))
.collect::<Vec<String>>()
.join(",");
debug!("Asm Constraints: {}", &all_constraints);
// Depending on how many outputs we have, the return type is different
let num_outputs = output_types.len();
let output_type = match num_outputs {
0 => self.type_void(),
1 => output_types[0],
_ => self.type_struct(&output_types, false),
};
let asm = ia.asm.as_str();
let r = inline_asm_call(
self,
&asm,
&all_constraints,
&inputs,
output_type,
ia.volatile,
ia.alignstack,
ia.dialect,
);
if r.is_none() {
return false;
}
let r = r.unwrap();
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &place)) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) };
OperandValue::Immediate(v).store(self, place);
}
// Store mark in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
unsafe {
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(
self.llcx,
key.as_ptr() as *const c_char,
key.len() as c_uint,
);
let val: &'ll Value = self.const_i32(span.ctxt().outer_expn().as_u32() as i32);
llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(self.llcx, &val, 1));
}
true*/
} }
fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span]) { fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, _span: &[Span]) {
@ -127,7 +29,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}; };
// Collect the types of output operands // Collect the types of output operands
// FIXME: we do this here instead of later because of a bug in libgccjit where creating the // FIXME(antoyo): we do this here instead of later because of a bug in libgccjit where creating the
// variable after the extended asm expression causes a segfault: // variable after the extended asm expression causes a segfault:
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100380 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100380
let mut output_vars = FxHashMap::default(); let mut output_vars = FxHashMap::default();
@ -160,11 +62,6 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
match out_place { match out_place {
Some(place) => place.layout.gcc_type(self.cx, false), Some(place) => place.layout.gcc_type(self.cx, false),
None => { None => {
// If the output is discarded, we don't really care what
// type is used. We're just using this to tell GCC to
// reserve the register.
//dummy_output_type(self.cx, reg.reg_class())
// NOTE: if no output value, we should not create one. // NOTE: if no output value, we should not create one.
continue; continue;
}, },
@ -251,9 +148,9 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
template_str template_str
} }
else { else {
// FIXME: this might break the "m" memory constraint: // FIXME(antoyo): this might break the "m" memory constraint:
// https://stackoverflow.com/a/9347957/389119 // https://stackoverflow.com/a/9347957/389119
// TODO: only set on x86 platforms. // TODO(antoyo): only set on x86 platforms.
format!(".att_syntax noprefix\n\t{}\n\t.intel_syntax noprefix", template_str) format!(".att_syntax noprefix\n\t{}\n\t.intel_syntax noprefix", template_str)
}; };
let extended_asm = block.add_extended_asm(None, &template_str); let extended_asm = block.add_extended_asm(None, &template_str);
@ -274,7 +171,6 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}, },
}; };
output_types.push(ty); output_types.push(ty);
//op_idx.insert(idx, constraints.len());
let prefix = if late { "=" } else { "=&" }; let prefix = if late { "=" } else { "=&" };
let constraint = format!("{}{}", prefix, reg_to_gcc(reg)); let constraint = format!("{}{}", prefix, reg_to_gcc(reg));
@ -295,14 +191,13 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
None => dummy_output_type(self.cx, reg.reg_class()) None => dummy_output_type(self.cx, reg.reg_class())
}; };
output_types.push(ty); output_types.push(ty);
//op_idx.insert(idx, constraints.len()); // TODO(antoyo): prefix of "+" for reading and writing?
// TODO: prefix of "+" for reading and writing?
let prefix = if late { "=" } else { "=&" }; let prefix = if late { "=" } else { "=&" };
let constraint = format!("{}{}", prefix, reg_to_gcc(reg)); let constraint = format!("{}{}", prefix, reg_to_gcc(reg));
if out_place.is_some() { if out_place.is_some() {
let var = output_vars[&idx]; let var = output_vars[&idx];
// TODO: also specify an output operand when out_place is none: that would // TODO(antoyo): also specify an output operand when out_place is none: that would
// be the clobber but clobbers do not support general constraint like reg; // be the clobber but clobbers do not support general constraint like reg;
// they only support named registers. // they only support named registers.
// Not sure how we can do this. And the LLVM backend does not seem to add a // Not sure how we can do this. And the LLVM backend does not seem to add a
@ -321,63 +216,6 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
} }
/*if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
match asm_arch {
InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
constraints.push("~{cc}".to_string());
}
InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
constraints.extend_from_slice(&[
"~{dirflag}".to_string(),
"~{fpsr}".to_string(),
"~{flags}".to_string(),
]);
}
InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {}
}
}
if !options.contains(InlineAsmOptions::NOMEM) {
// This is actually ignored by LLVM, but it's probably best to keep
// it just in case. LLVM instead uses the ReadOnly/ReadNone
// attributes on the call instruction to optimize.
constraints.push("~{memory}".to_string());
}
let volatile = !options.contains(InlineAsmOptions::PURE);
let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
let output_type = match &output_types[..] {
[] => self.type_void(),
[ty] => ty,
tys => self.type_struct(&tys, false),
};*/
/*let result = inline_asm_call(
self,
&template_str,
&constraints.join(","),
&inputs,
output_type,
volatile,
alignstack,
dialect,
span,
)
.unwrap_or_else(|| span_bug!(span, "LLVM asm constraint validation failed"));
if options.contains(InlineAsmOptions::PURE) {
if options.contains(InlineAsmOptions::NOMEM) {
llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result);
} else if options.contains(InlineAsmOptions::READONLY) {
llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result);
}
} else {
if options.contains(InlineAsmOptions::NOMEM) {
llvm::Attribute::InaccessibleMemOnly
.apply_callsite(llvm::AttributePlace::Function, result);
} else {
// LLVM doesn't have an attribute to represent ReadOnly + SideEffect
}
}*/
// Write results to outputs // Write results to outputs
for (idx, op) in operands.iter().enumerate() { for (idx, op) in operands.iter().enumerate() {
if let InlineAsmOperandRef::Out { place: Some(place), .. } if let InlineAsmOperandRef::Out { place: Some(place), .. }
@ -390,12 +228,12 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
/// Converts a register class to a GCC constraint code. /// Converts a register class to a GCC constraint code.
// TODO: return &'static str instead? // TODO(antoyo): return &'static str instead?
fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> String { fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> String {
match reg { match reg {
// For vector registers LLVM wants the register name to match the type size. // For vector registers LLVM wants the register name to match the type size.
InlineAsmRegOrRegClass::Reg(reg) => { InlineAsmRegOrRegClass::Reg(reg) => {
// TODO: add support for vector register. // TODO(antoyo): add support for vector register.
let constraint = let constraint =
match reg.name() { match reg.name() {
"ax" => "a", "ax" => "a",
@ -404,11 +242,11 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> String {
"dx" => "d", "dx" => "d",
"si" => "S", "si" => "S",
"di" => "D", "di" => "D",
// TODO: for registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119 // TODO(antoyo): for registers like r11, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
// TODO: in this case though, it's a clobber, so it should work as r11. // TODO(antoyo): in this case though, it's a clobber, so it should work as r11.
// Recent nightly supports clobber() syntax, so update to it. It does not seem // Recent nightly supports clobber() syntax, so update to it. It does not seem
// like it's implemented yet. // like it's implemented yet.
name => name, // FIXME: probably wrong. name => name, // FIXME(antoyo): probably wrong.
}; };
constraint.to_string() constraint.to_string()
}, },
@ -570,7 +408,6 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
| InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
unimplemented!() unimplemented!()
//if modifier == Some('v') { None } else { modifier }
} }
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(), | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => unimplemented!(),
@ -583,11 +420,6 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => { | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
unimplemented!() unimplemented!()
/*if modifier.is_none() {
Some('q')
} else {
modifier
}*/
} }
InlineAsmRegClass::Bpf(_) => unimplemented!(), InlineAsmRegClass::Bpf(_) => unimplemented!(),
InlineAsmRegClass::Hexagon(_) => unimplemented!(), InlineAsmRegClass::Hexagon(_) => unimplemented!(),
@ -612,15 +444,7 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(), InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => unimplemented!(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg) InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!() /*match (reg, modifier) { | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => unimplemented!(),
(X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
(X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
(X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
(_, Some('x')) => Some('x'),
(_, Some('y')) => Some('t'),
(_, Some('z')) => Some('g'),
_ => unreachable!(),
}*/,
InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(), InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(), InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => unimplemented!(),
InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(), InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),

View File

@ -15,132 +15,18 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
{ {
let context = &module.module_llvm.context; let context = &module.module_llvm.context;
//let llcx = &*module.module_llvm.llcx;
//let tm = &*module.module_llvm.tm;
let module_name = module.name.clone(); let module_name = module.name.clone();
let module_name = Some(&module_name[..]); let module_name = Some(&module_name[..]);
//let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
/*if cgcx.msvc_imps_needed {
create_msvc_imps(cgcx, llcx, llmod);
}*/
// A codegen-specific pass manager is used to generate object
// files for an GCC module.
//
// Apparently each of these pass managers is a one-shot kind of
// thing, so we create a new one for each type of output. The
// pass manager passed to the closure should be ensured to not
// escape the closure itself, and the manager should only be
// used once.
/*unsafe fn with_codegen<'ll, F, R>(tm: &'ll llvm::TargetMachine, llmod: &'ll llvm::Module, no_builtins: bool, f: F) -> R
where F: FnOnce(&'ll mut PassManager<'ll>) -> R,
{
let cpm = llvm::LLVMCreatePassManager();
llvm::LLVMAddAnalysisPasses(tm, cpm);
llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
f(cpm)
}*/
// Two things to note:
// - If object files are just LLVM bitcode we write bitcode, copy it to
// the .o file, and delete the bitcode if it wasn't otherwise
// requested.
// - If we don't have the integrated assembler then we need to emit
// asm from LLVM and use `gcc` to create the object file.
let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name); let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name); let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
if config.bitcode_needed() { if config.bitcode_needed() {
// TODO // TODO(antoyo)
/*let _timer = cgcx
.prof
.generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &module.name[..]);
let thin = ThinBuffer::new(llmod);
let data = thin.data();
if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
let _timer = cgcx.prof.generic_activity_with_arg(
"LLVM_module_codegen_emit_bitcode",
&module.name[..],
);
if let Err(e) = fs::write(&bc_out, data) {
let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
diag_handler.err(&msg);
} }
}
if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
let _timer = cgcx.prof.generic_activity_with_arg(
"LLVM_module_codegen_embed_bitcode",
&module.name[..],
);
embed_bitcode(cgcx, llcx, llmod, Some(data));
}
if config.emit_bc_compressed {
let _timer = cgcx.prof.generic_activity_with_arg(
"LLVM_module_codegen_emit_compressed_bitcode",
&module.name[..],
);
let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
let data = bytecode::encode(&module.name, data);
if let Err(e) = fs::write(&dst, data) {
let msg = format!("failed to write bytecode to {}: {}", dst.display(), e);
diag_handler.err(&msg);
}
}*/
} /*else if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Marker) {
unimplemented!();
//embed_bitcode(cgcx, llcx, llmod, None);
}*/
if config.emit_ir { if config.emit_ir {
unimplemented!(); unimplemented!();
/*let _timer = cgcx
.prof
.generic_activity_with_arg("LLVM_module_codegen_emit_ir", &module.name[..]);
let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
let out_c = path_to_c_string(&out);
extern "C" fn demangle_callback(
input_ptr: *const c_char,
input_len: size_t,
output_ptr: *mut c_char,
output_len: size_t,
) -> size_t {
let input =
unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) };
let input = match str::from_utf8(input) {
Ok(s) => s,
Err(_) => return 0,
};
let output = unsafe {
slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
};
let mut cursor = io::Cursor::new(output);
let demangled = match rustc_demangle::try_demangle(input) {
Ok(d) => d,
Err(_) => return 0,
};
if write!(cursor, "{:#}", demangled).is_err() {
// Possible only if provided buffer is not big enough
return 0;
}
cursor.position() as size_t
}
let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
result.into_result().map_err(|()| {
let msg = format!("failed to write LLVM IR to {}", out.display());
llvm_err(diag_handler, &msg)
})?;*/
} }
if config.emit_asm { if config.emit_asm {
@ -149,10 +35,6 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
.generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]); .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]);
let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name); let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str")); context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
/*with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(diag_handler, tm, cpm, llmod, &path, llvm::FileType::AssemblyFile)
})?;*/
} }
match config.emit_obj { match config.emit_obj {
@ -160,13 +42,11 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
let _timer = cgcx let _timer = cgcx
.prof .prof
.generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]); .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]);
//with_codegen(tm, llmod, config.no_builtins, |cpm| {
//println!("1: {}", module.name);
match &*module.name { match &*module.name {
"std_example.7rcbfp3g-cgu.15" => { "std_example.7rcbfp3g-cgu.15" => {
println!("Dumping reproducer {}", module.name); println!("Dumping reproducer {}", module.name);
let _ = fs::create_dir("/tmp/reproducers"); let _ = fs::create_dir("/tmp/reproducers");
// FIXME: segfault in dump_reproducer_to_file() might be caused by // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
// transmuting an rvalue to an lvalue. // transmuting an rvalue to an lvalue.
// Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name)); context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
@ -174,33 +54,15 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
}, },
_ => (), _ => (),
} }
/*let _ = fs::create_dir("/tmp/dumps");
context.dump_to_file(&format!("/tmp/dumps/{}.c", module.name), true);
println!("Dumped {}", module.name);*/
//println!("Compile module {}", module.name);
context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str")); context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
//})?;
} }
EmitObj::Bitcode => { EmitObj::Bitcode => {
//unimplemented!(); // TODO(antoyo)
/*debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
if let Err(e) = link_or_copy(&bc_out, &obj_out) {
diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
}
if !config.emit_bc {
debug!("removing_bitcode {:?}", bc_out);
if let Err(e) = fs::remove_file(&bc_out) {
diag_handler.err(&format!("failed to remove bitcode: {}", e));
}
}*/
} }
EmitObj::None => {} EmitObj::None => {}
} }
//drop(handlers);
} }
Ok(module.into_compiled_module( Ok(module.into_compiled_module(
@ -213,22 +75,4 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_han
pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> { pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> {
unimplemented!(); unimplemented!();
/*use super::lto::{Linker, ModuleBuffer};
// Sort the modules by name to ensure to ensure deterministic behavior.
modules.sort_by(|a, b| a.name.cmp(&b.name));
let (first, elements) =
modules.split_first().expect("Bug! modules must contain at least one module.");
let mut linker = Linker::new(first.module_llvm.llmod());
for module in elements {
let _timer =
cgcx.prof.generic_activity_with_arg("LLVM_link_module", format!("{:?}", module.name));
let buffer = ModuleBuffer::new(module.module_llvm.llmod());
linker.add(&buffer.data()).map_err(|()| {
let msg = format!("failed to serialize module {:?}", module.name);
llvm_err(&diag_handler, &msg)
})?;
}
drop(linker);
Ok(modules.remove(0))*/
} }

View File

@ -35,7 +35,7 @@ pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind {
Linkage::Appending => unimplemented!(), Linkage::Appending => unimplemented!(),
Linkage::Internal => GlobalKind::Internal, Linkage::Internal => GlobalKind::Internal,
Linkage::Private => GlobalKind::Internal, Linkage::Private => GlobalKind::Internal,
Linkage::ExternalWeak => GlobalKind::Imported, // TODO: should be weak linkage. Linkage::ExternalWeak => GlobalKind::Imported, // TODO(antoyo): should be weak linkage.
Linkage::Common => unimplemented!(), Linkage::Common => unimplemented!(),
} }
} }
@ -46,7 +46,7 @@ pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
Linkage::AvailableExternally => FunctionType::Extern, Linkage::AvailableExternally => FunctionType::Extern,
Linkage::LinkOnceAny => unimplemented!(), Linkage::LinkOnceAny => unimplemented!(),
Linkage::LinkOnceODR => unimplemented!(), Linkage::LinkOnceODR => unimplemented!(),
Linkage::WeakAny => FunctionType::Exported, // FIXME: should be similar to linkonce. Linkage::WeakAny => FunctionType::Exported, // FIXME(antoyo): should be similar to linkonce.
Linkage::WeakODR => unimplemented!(), Linkage::WeakODR => unimplemented!(),
Linkage::Appending => unimplemented!(), Linkage::Appending => unimplemented!(),
Linkage::Internal => FunctionType::Internal, Linkage::Internal => FunctionType::Internal,
@ -74,19 +74,25 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul
// Instantiate monomorphizations without filling out definitions yet... // Instantiate monomorphizations without filling out definitions yet...
//let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str()); //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
let context = Context::default(); let context = Context::default();
// TODO: only set on x86 platforms. // TODO(antoyo): only set on x86 platforms.
context.add_command_line_option("-masm=intel"); context.add_command_line_option("-masm=intel");
for arg in &tcx.sess.opts.cg.llvm_args { for arg in &tcx.sess.opts.cg.llvm_args {
context.add_command_line_option(arg); context.add_command_line_option(arg);
} }
context.add_command_line_option("-fno-semantic-interposition"); context.add_command_line_option("-fno-semantic-interposition");
//context.set_dump_code_on_compile(true); if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") {
context.set_dump_code_on_compile(true);
}
if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") { if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") {
context.set_dump_initial_gimple(true); context.set_dump_initial_gimple(true);
} }
context.set_debug_info(true); context.set_debug_info(true);
//context.set_dump_everything(true); if env::var("CG_GCCJIT_DUMP_EVERYTHING").as_deref() == Ok("1") {
//context.set_keep_intermediates(true); context.set_dump_everything(true);
}
if env::var("CG_GCCJIT_KEEP_INTERMEDIATES").as_deref() == Ok("1") {
context.set_keep_intermediates(true);
}
{ {
let cx = CodegenCx::new(&context, cgu, tcx); let cx = CodegenCx::new(&context, cgu, tcx);
@ -100,7 +106,6 @@ pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (Modul
block.end_with_void_return(None); block.end_with_void_return(None);
}); });
//println!("module_codegen: {:?} {:?}", cgu_name, &cx.context as *const _);
let mono_items = cgu.items_in_deterministic_order(tcx); let mono_items = cgu.items_in_deterministic_order(tcx);
for &(mono_item, (linkage, visibility)) in &mono_items { for &(mono_item, (linkage, visibility)) in &mono_items {
mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility); mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);

View File

@ -48,10 +48,10 @@ use crate::common::{SignType, TypeReflection, type_is_pointer};
use crate::context::CodegenCx; use crate::context::CodegenCx;
use crate::type_of::LayoutGccExt; use crate::type_of::LayoutGccExt;
// TODO // TODO(antoyo)
type Funclet = (); type Funclet = ();
// TODO: remove this variable. // TODO(antoyo): remove this variable.
static mut RETURN_VALUE_COUNT: usize = 0; static mut RETURN_VALUE_COUNT: usize = 0;
enum ExtremumOperation { enum ExtremumOperation {
@ -99,7 +99,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let load_ordering = let load_ordering =
match order { match order {
// TODO: does this make sense? // TODO(antoyo): does this make sense?
AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire, AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
_ => order.clone(), _ => order.clone(),
}; };
@ -162,26 +162,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
} }
fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> { fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
//let mut fn_ty = self.cx.val_ty(func);
// Strip off pointers
/*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
fn_ty = self.cx.element_type(fn_ty);
}*/
/*assert!(
self.cx.type_kind(fn_ty) == TypeKind::Function,
"builder::{} not passed a function, but {:?}",
typ,
fn_ty
);
let param_tys = self.cx.func_params_types(fn_ty);
let all_args_match = param_tys
.iter()
.zip(args.iter().map(|&v| self.val_ty(v)))
.all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
let mut all_args_match = true; let mut all_args_match = true;
let mut param_types = vec![]; let mut param_types = vec![];
let param_count = func.get_param_count(); let param_count = func.get_param_count();
@ -205,16 +185,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
.map(|(_i, (expected_ty, &actual_val))| { .map(|(_i, (expected_ty, &actual_val))| {
let actual_ty = actual_val.get_type(); let actual_ty = actual_val.get_type();
if expected_ty != actual_ty { if expected_ty != actual_ty {
/*debug!(
"type mismatch in function call of {:?}. \
Expected {:?} for param {}, got {:?}; injecting bitcast",
func, expected_ty, i, actual_ty
);*/
/*println!(
"type mismatch in function call of {:?}. \
Expected {:?} for param {}, got {:?}; injecting bitcast",
func, expected_ty, i, actual_ty
);*/
self.bitcast(actual_val, expected_ty) self.bitcast(actual_val, expected_ty)
} }
else { else {
@ -227,26 +197,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
} }
fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> { fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
//let mut fn_ty = self.cx.val_ty(func);
// Strip off pointers
/*while self.cx.type_kind(fn_ty) == TypeKind::Pointer {
fn_ty = self.cx.element_type(fn_ty);
}*/
/*assert!(
self.cx.type_kind(fn_ty) == TypeKind::Function,
"builder::{} not passed a function, but {:?}",
typ,
fn_ty
);
let param_tys = self.cx.func_params_types(fn_ty);
let all_args_match = param_tys
.iter()
.zip(args.iter().map(|&v| self.val_ty(v)))
.all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);*/
let mut all_args_match = true; let mut all_args_match = true;
let mut param_types = vec![]; let mut param_types = vec![];
let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr"); let gcc_func = func_ptr.get_type().is_function_ptr_type().expect("function ptr");
@ -269,16 +219,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
.map(|(_i, (expected_ty, &actual_val))| { .map(|(_i, (expected_ty, &actual_val))| {
let actual_ty = actual_val.get_type(); let actual_ty = actual_val.get_type();
if expected_ty != actual_ty { if expected_ty != actual_ty {
/*debug!(
"type mismatch in function call of {:?}. \
Expected {:?} for param {}, got {:?}; injecting bitcast",
func, expected_ty, i, actual_ty
);*/
/*println!(
"type mismatch in function call of {:?}. \
Expected {:?} for param {}, got {:?}; injecting bitcast",
func, expected_ty, i, actual_ty
);*/
self.bitcast(actual_val, expected_ty) self.bitcast(actual_val, expected_ty)
} }
else { else {
@ -291,27 +231,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
} }
fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> { fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO: make sure make_pointer() is okay here. let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
let stored_ty = self.cx.val_ty(val); let stored_ty = self.cx.val_ty(val);
let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
//assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
if dest_ptr_ty == stored_ptr_ty { if dest_ptr_ty == stored_ptr_ty {
ptr ptr
} }
else { else {
/*debug!(
"type mismatch in store. \
Expected {:?}, got {:?}; inserting bitcast",
dest_ptr_ty, stored_ptr_ty
);*/
/*println!(
"type mismatch in store. \
Expected {:?}, got {:?}; inserting bitcast",
dest_ptr_ty, stored_ptr_ty
);*/
//ptr
self.bitcast(ptr, stored_ptr_ty) self.bitcast(ptr, stored_ptr_ty)
} }
} }
@ -321,13 +248,9 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
} }
fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
//debug!("call {:?} with args ({:?})", func, args); // TODO(antoyo): remove when the API supports a different type for functions.
// TODO: remove when the API supports a different type for functions.
let func: Function<'gcc> = self.cx.rvalue_as_function(func); let func: Function<'gcc> = self.cx.rvalue_as_function(func);
let args = self.check_call("call", func, args); let args = self.check_call("call", func, args);
//let bundle = funclet.map(|funclet| funclet.bundle());
//let bundle = bundle.as_ref().map(|b| &*b.raw);
// gccjit requires to use the result of functions, even when it's not used. // gccjit requires to use the result of functions, even when it's not used.
// That's why we assign the result to a local or call add_eval(). // That's why we assign the result to a local or call add_eval().
@ -349,11 +272,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
} }
fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
//debug!("func ptr call {:?} with args ({:?})", func, args);
let args = self.check_ptr_call("call", func_ptr, args); let args = self.check_ptr_call("call", func_ptr, args);
//let bundle = funclet.map(|funclet| funclet.bundle());
//let bundle = bundle.as_ref().map(|b| &*b.raw);
// gccjit requires to use the result of functions, even when it's not used. // gccjit requires to use the result of functions, even when it's not used.
// That's why we assign the result to a local or call add_eval(). // That's why we assign the result to a local or call add_eval().
@ -363,7 +282,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
let void_type = self.context.new_type::<()>(); let void_type = self.context.new_type::<()>();
let current_func = current_block.get_function(); let current_func = current_block.get_function();
// FIXME: As a temporary workaround for unsupported LLVM intrinsics. // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" { if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
return_type = self.int_type; return_type = self.int_type;
} }
@ -376,7 +295,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
} }
else { else {
if gcc_func.get_param_count() == 0 { if gcc_func.get_param_count() == 0 {
// FIXME: As a temporary workaround for unsupported LLVM intrinsics. // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[])); current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
} }
else { else {
@ -390,17 +309,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
} }
pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> { pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
//debug!("overflow_call {:?} with args ({:?})", func, args);
//let bundle = funclet.map(|funclet| funclet.bundle());
//let bundle = bundle.as_ref().map(|b| &*b.raw);
// gccjit requires to use the result of functions, even when it's not used. // gccjit requires to use the result of functions, even when it's not used.
// That's why we assign the result to a local. // That's why we assign the result to a local.
let return_type = self.context.new_type::<bool>(); let return_type = self.context.new_type::<bool>();
let current_block = self.current_block.borrow().expect("block"); let current_block = self.current_block.borrow().expect("block");
let current_func = current_block.get_function(); let current_func = current_block.get_function();
// TODO: return the new_call() directly? Since the overflow function has no side-effects. // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
unsafe { RETURN_VALUE_COUNT += 1 }; unsafe { RETURN_VALUE_COUNT += 1 };
let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT })); let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args)); current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
@ -520,25 +434,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.llbb().end_with_conditional(None, condition, then, catch); self.llbb().end_with_conditional(None, condition, then, catch);
self.context.new_rvalue_from_int(self.int_type, 0) self.context.new_rvalue_from_int(self.int_type, 0)
// TODO // TODO(antoyo)
/*debug!("invoke {:?} with args ({:?})", func, args);
let args = self.check_call("invoke", func, args);
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.as_ref().map(|b| &*b.raw);
unsafe {
llvm::LLVMRustBuildInvoke(
self.llbuilder,
func,
args.as_ptr(),
args.len() as c_uint,
then,
catch,
bundle,
UNNAMED,
)
}*/
} }
fn unreachable(&mut self) { fn unreachable(&mut self) {
@ -558,7 +454,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
// FIXME: this should not be required. // FIXME(antoyo): this should not be required.
if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) { if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
b = self.context.new_cast(None, b, a.get_type()); b = self.context.new_cast(None, b, a.get_type());
} }
@ -589,24 +485,24 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO: convert the arguments to unsigned? // TODO(antoyo): convert the arguments to unsigned?
a / b a / b
} }
fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO: convert the arguments to unsigned? // TODO(antoyo): convert the arguments to unsigned?
// TODO: poison if not exact. // TODO(antoyo): poison if not exact.
a / b a / b
} }
fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO: convert the arguments to signed? // TODO(antoyo): convert the arguments to signed?
a / b a / b
} }
fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO: posion if not exact. // TODO(antoyo): posion if not exact.
// FIXME: rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
// should be the same. // should be the same.
let typ = a.get_type().to_signed(self); let typ = a.get_type().to_signed(self);
let a = self.context.new_cast(None, a, typ); let a = self.context.new_cast(None, a, typ);
@ -629,7 +525,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
if a.get_type() == self.cx.float_type { if a.get_type() == self.cx.float_type {
let fmodf = self.context.get_builtin_function("fmodf"); let fmodf = self.context.get_builtin_function("fmodf");
// FIXME: this seems to produce the wrong result. // FIXME(antoyo): this seems to produce the wrong result.
return self.context.new_call(None, fmodf, &[a, b]); return self.context.new_call(None, fmodf, &[a, b]);
} }
assert_eq!(a.get_type(), self.cx.double_type); assert_eq!(a.get_type(), self.cx.double_type);
@ -639,18 +535,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number. // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
let a_type = a.get_type(); let a_type = a.get_type();
let b_type = b.get_type(); let b_type = b.get_type();
if a_type.is_unsigned(self) && b_type.is_signed(self) { if a_type.is_unsigned(self) && b_type.is_signed(self) {
//println!("shl: {:?} -> {:?}", a, b_type);
let a = self.context.new_cast(None, a, b_type); let a = self.context.new_cast(None, a, b_type);
let result = a << b; let result = a << b;
//println!("shl: {:?} -> {:?}", result, a_type);
self.context.new_cast(None, result, a_type) self.context.new_cast(None, result, a_type)
} }
else if a_type.is_signed(self) && b_type.is_unsigned(self) { else if a_type.is_signed(self) && b_type.is_unsigned(self) {
//println!("shl: {:?} -> {:?}", b, a_type);
let b = self.context.new_cast(None, b, a_type); let b = self.context.new_cast(None, b, a_type);
a << b a << b
} }
@ -660,19 +553,16 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number. // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
// TODO: cast to unsigned to do a logical shift if that does not work. // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
let a_type = a.get_type(); let a_type = a.get_type();
let b_type = b.get_type(); let b_type = b.get_type();
if a_type.is_unsigned(self) && b_type.is_signed(self) { if a_type.is_unsigned(self) && b_type.is_signed(self) {
//println!("lshl: {:?} -> {:?}", a, b_type);
let a = self.context.new_cast(None, a, b_type); let a = self.context.new_cast(None, a, b_type);
let result = a >> b; let result = a >> b;
//println!("lshl: {:?} -> {:?}", result, a_type);
self.context.new_cast(None, result, a_type) self.context.new_cast(None, result, a_type)
} }
else if a_type.is_signed(self) && b_type.is_unsigned(self) { else if a_type.is_signed(self) && b_type.is_unsigned(self) {
//println!("lshl: {:?} -> {:?}", b, a_type);
let b = self.context.new_cast(None, b, a_type); let b = self.context.new_cast(None, b, a_type);
a >> b a >> b
} }
@ -682,19 +572,16 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO: check whether behavior is an arithmetic shift for >> . // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
// FIXME: remove the casts when libgccjit can shift an unsigned number by an unsigned number. // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
let a_type = a.get_type(); let a_type = a.get_type();
let b_type = b.get_type(); let b_type = b.get_type();
if a_type.is_unsigned(self) && b_type.is_signed(self) { if a_type.is_unsigned(self) && b_type.is_signed(self) {
//println!("ashl: {:?} -> {:?}", a, b_type);
let a = self.context.new_cast(None, a, b_type); let a = self.context.new_cast(None, a, b_type);
let result = a >> b; let result = a >> b;
//println!("ashl: {:?} -> {:?}", result, a_type);
self.context.new_cast(None, result, a_type) self.context.new_cast(None, result, a_type)
} }
else if a_type.is_signed(self) && b_type.is_unsigned(self) { else if a_type.is_signed(self) && b_type.is_unsigned(self) {
//println!("ashl: {:?} -> {:?}", b, a_type);
let b = self.context.new_cast(None, b, a_type); let b = self.context.new_cast(None, b, a_type);
a >> b a >> b
} }
@ -704,7 +591,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> { fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
// FIXME: hack by putting the result in a variable to workaround this bug: // FIXME(antoyo): hack by putting the result in a variable to workaround this bug:
// https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498 // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
if a.get_type() != b.get_type() { if a.get_type() != b.get_type() {
b = self.context.new_cast(None, b, a.get_type()); b = self.context.new_cast(None, b, a.get_type());
@ -715,7 +602,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// FIXME: hack by putting the result in a variable to workaround this bug: // FIXME(antoyo): hack by putting the result in a variable to workaround this bug:
// https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498 // https://gcc.gnu.org/bugzilla//show_bug.cgi?id=95498
let res = self.current_func().new_local(None, b.get_type(), "orResult"); let res = self.current_func().new_local(None, b.get_type(), "orResult");
self.llbb().add_assignment(None, res, a | b); self.llbb().add_assignment(None, res, a | b);
@ -727,7 +614,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> { fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
// TODO: use new_unary_op()? // TODO(antoyo): use new_unary_op()?
self.cx.context.new_rvalue_from_long(a.get_type(), 0) - a self.cx.context.new_rvalue_from_long(a.get_type(), 0) - a
} }
@ -759,7 +646,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
// TODO: should generate poison value? // TODO(antoyo): should generate poison value?
a - b a - b
} }
@ -773,47 +660,22 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*unsafe {
let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}*/
} }
fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*unsafe {
let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}*/
} }
fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*unsafe {
let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}*/
} }
fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*unsafe {
let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}*/
} }
fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> { fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*unsafe {
let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetHasUnsafeAlgebra(instr);
instr
}*/
} }
fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) { fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
@ -827,7 +689,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"), _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
}; };
// TODO: remove duplication with intrinsic? // TODO(antoyo): remove duplication with intrinsic?
let name = let name =
match oop { match oop {
OverflowOp::Add => OverflowOp::Add =>
@ -882,7 +744,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let intrinsic = self.context.get_builtin_function(&name); let intrinsic = self.context.get_builtin_function(&name);
let res = self.current_func() let res = self.current_func()
// TODO: is it correct to use rhs type instead of the parameter typ? // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
.new_local(None, rhs.get_type(), "binopResult") .new_local(None, rhs.get_type(), "binopResult")
.get_address(None); .get_address(None);
let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None); let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
@ -890,7 +752,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> { fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
// FIXME: this check that we don't call get_aligned() a second time on a time. // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
// Ideally, we shouldn't need to do this check. // Ideally, we shouldn't need to do this check.
let aligned_type = let aligned_type =
if ty == self.cx.u128_type || ty == self.cx.i128_type { if ty == self.cx.u128_type || ty == self.cx.i128_type {
@ -899,37 +761,27 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
else { else {
ty.get_aligned(align.bytes()) ty.get_aligned(align.bytes())
}; };
// TODO: It might be better to return a LValue, but fixing the rustc API is non-trivial. // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
self.stack_var_count.set(self.stack_var_count.get() + 1); self.stack_var_count.set(self.stack_var_count.get() + 1);
self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None) self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
} }
fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> { fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*unsafe {
let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}*/
} }
fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> { fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*unsafe {
let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}*/
} }
fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> { fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
// TODO: use ty. // TODO(antoyo): use ty.
let block = self.llbb(); let block = self.llbb();
let function = block.get_function(); let function = block.get_function();
// NOTE: instead of returning the dereference here, we have to assign it to a variable in // NOTE: instead of returning the dereference here, we have to assign it to a variable in
// the current basic block. Otherwise, it could be used in another basic block, causing a // the current basic block. Otherwise, it could be used in another basic block, causing a
// dereference after a drop, for instance. // dereference after a drop, for instance.
// TODO: handle align. // TODO(antoyo): handle align.
let deref = ptr.dereference(None).to_rvalue(); let deref = ptr.dereference(None).to_rvalue();
let value_type = deref.get_type(); let value_type = deref.get_type();
unsafe { RETURN_VALUE_COUNT += 1 }; unsafe { RETURN_VALUE_COUNT += 1 };
@ -939,16 +791,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> { fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
// TODO: use ty. // TODO(antoyo): use ty.
//println!("5: volatile load: {:?} to {:?}", ptr, ptr.get_type().make_volatile());
let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile()); let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
//println!("6");
ptr.dereference(None).to_rvalue() ptr.dereference(None).to_rvalue()
} }
fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> { fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
// TODO: use ty. // TODO(antoyo): use ty.
// TODO: handle alignment. // TODO(antoyo): handle alignment.
let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes())); let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
@ -958,8 +808,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> { fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
//debug!("PlaceRef::load: {:?}", place);
assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
if place.layout.is_zst() { if place.layout.is_zst() {
@ -987,22 +835,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
OperandValue::Ref(place.llval, Some(llextra), place.align) OperandValue::Ref(place.llval, Some(llextra), place.align)
} }
else if place.layout.is_gcc_immediate() { else if place.layout.is_gcc_immediate() {
let const_llval = None;
/*unsafe {
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
const_llval = llvm::LLVMGetInitializer(global);
}
}
}*/
let llval = const_llval.unwrap_or_else(|| {
let load = self.load(place.llval.get_type(), place.llval, place.align); let load = self.load(place.llval.get_type(), place.llval, place.align);
if let abi::Abi::Scalar(ref scalar) = place.layout.abi { if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar); scalar_load_metadata(self, load, scalar);
} }
load OperandValue::Immediate(self.to_immediate(load, place.layout))
});
OperandValue::Immediate(self.to_immediate(llval, place.layout))
} }
else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
let b_offset = a.value.size(self).align_to(b.value.align(self).abi); let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
@ -1058,39 +895,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn range_metadata(&mut self, _load: RValue<'gcc>, _range: Range<u128>) { fn range_metadata(&mut self, _load: RValue<'gcc>, _range: Range<u128>) {
// TODO // TODO(antoyo)
/*if self.sess().target.target.arch == "amdgpu" {
// amdgpu/LLVM does something weird and thinks a i64 value is
// split into a v2i32, halving the bitwidth LLVM expects,
// tripping an assertion. So, for now, just disable this
// optimization.
return;
}
unsafe {
let llty = self.cx.val_ty(load);
let v = [
self.cx.const_uint_big(llty, range.start),
self.cx.const_uint_big(llty, range.end),
];
llvm::LLVMSetMetadata(
load,
llvm::MD_range as c_uint,
llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
);
}*/
} }
fn nonnull_metadata(&mut self, _load: RValue<'gcc>) { fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
// TODO // TODO(antoyo)
/*unsafe {
llvm::LLVMSetMetadata(
load,
llvm::MD_nonnull as c_uint,
llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
);
}*/
} }
fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> { fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
@ -1098,36 +907,21 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> { fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> {
//debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
let ptr = self.check_store(val, ptr); let ptr = self.check_store(val, ptr);
self.llbb().add_assignment(None, ptr.dereference(None), val); self.llbb().add_assignment(None, ptr.dereference(None), val);
/*let align = // TODO(antoyo): handle align and flags.
if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint }; // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
llvm::LLVMSetAlignment(store, align);
if flags.contains(MemFlags::VOLATILE) {
llvm::LLVMSetVolatile(store, llvm::True);
}
if flags.contains(MemFlags::NONTEMPORAL) {
// According to LLVM [1] building a nontemporal store must
// *always* point to a metadata value of the integer 1.
//
// [1]: http://llvm.org/docs/LangRef.html#store-instruction
let one = self.cx.const_i32(1);
let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
}*/
// NOTE: dummy value here since it's never used. FIXME: API should not return a value here?
self.cx.context.new_rvalue_zero(self.type_i32()) self.cx.context.new_rvalue_zero(self.type_i32())
} }
fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) { fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
// TODO: handle alignment. // TODO(antoyo): handle alignment.
let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes())); let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc()); let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile(); let volatile_const_void_ptr_type = self.context.new_type::<*mut ()>().make_const().make_volatile();
let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type); let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
// FIXME: fix libgccjit to allow comparing an integer type with an aligned integer type because // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
// the following cast is required to avoid this error: // the following cast is required to avoid this error:
// gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4)))) // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4))))
let int_type = atomic_store.get_param(1).to_rvalue().get_type(); let int_type = atomic_store.get_param(1).to_rvalue().get_type();
@ -1145,14 +939,14 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> { fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
// FIXME: would be safer if doing the same thing (loop) as gep. // FIXME(antoyo): would be safer if doing the same thing (loop) as gep.
// TODO: specify inbounds somehow. // TODO(antoyo): specify inbounds somehow.
match indices.len() { match indices.len() {
1 => { 1 => {
self.context.new_array_access(None, ptr, indices[0]).get_address(None) self.context.new_array_access(None, ptr, indices[0]).get_address(None)
}, },
2 => { 2 => {
let array = ptr.dereference(None); // TODO: assert that first index is 0? let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0?
self.context.new_array_access(None, array, indices[1]).get_address(None) self.context.new_array_access(None, array, indices[1]).get_address(None)
}, },
_ => unimplemented!(), _ => unimplemented!(),
@ -1160,7 +954,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> { fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
// FIXME: it would be better if the API only called this on struct, not on arrays. // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
assert_eq!(idx as usize as u64, idx); assert_eq!(idx as usize as u64, idx);
let value = ptr.dereference(None).to_rvalue(); let value = ptr.dereference(None).to_rvalue();
@ -1186,31 +980,21 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
/* Casts */ /* Casts */
fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
// TODO: check that it indeed truncate the value. // TODO(antoyo): check that it indeed truncate the value.
//println!("trunc: {:?} -> {:?}", value, dest_ty);
self.context.new_cast(None, value, dest_ty) self.context.new_cast(None, value, dest_ty)
} }
fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
// TODO: check that it indeed sign extend the value. // TODO(antoyo): check that it indeed sign extend the value.
//println!("Sext {:?} to {:?}", value, dest_ty);
//if let Some(vector_type) = value.get_type().is_vector() {
if dest_ty.is_vector().is_some() { if dest_ty.is_vector().is_some() {
// TODO: nothing to do as it is only for LLVM? // TODO(antoyo): nothing to do as it is only for LLVM?
return value; return value;
/*let dest_type = self.context.new_vector_type(dest_ty, vector_type.get_num_units() as u64);
println!("Casting {:?} to {:?}", value, dest_type);
return self.context.new_cast(None, value, dest_type);*/
} }
self.context.new_cast(None, value, dest_ty) self.context.new_cast(None, value, dest_ty)
} }
fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
//println!("7: fptoui: {:?} to {:?}", value, dest_ty); self.context.new_cast(None, value, dest_ty)
let ret = self.context.new_cast(None, value, dest_ty);
//println!("8");
ret
//unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
} }
fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
@ -1218,21 +1002,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
//println!("1: uitofp: {:?} -> {:?}", value, dest_ty); self.context.new_cast(None, value, dest_ty)
let ret = self.context.new_cast(None, value, dest_ty);
//println!("2");
ret
} }
fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
//println!("3: sitofp: {:?} -> {:?}", value, dest_ty); self.context.new_cast(None, value, dest_ty)
let ret = self.context.new_cast(None, value, dest_ty);
//println!("4");
ret
} }
fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
// TODO: make sure it trancates. // TODO(antoyo): make sure it truncates.
self.context.new_cast(None, value, dest_ty) self.context.new_cast(None, value, dest_ty)
} }
@ -1254,12 +1032,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> { fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
// NOTE: is_signed is for value, not dest_typ. // NOTE: is_signed is for value, not dest_typ.
//println!("intcast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_typ);
self.cx.context.new_cast(None, value, dest_typ) self.cx.context.new_cast(None, value, dest_typ)
} }
fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
//println!("pointercast: {:?} ({:?}) -> {:?}", value, value.get_type(), dest_ty);
let val_type = value.get_type(); let val_type = value.get_type();
match (type_is_pointer(val_type), type_is_pointer(dest_ty)) { match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
(false, true) => { (false, true) => {
@ -1269,7 +1045,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
}, },
(false, false) => { (false, false) => {
// When they are not pointers, we want a transmute (or reinterpret_cast). // When they are not pointers, we want a transmute (or reinterpret_cast).
//self.cx.context.new_cast(None, value, dest_ty)
self.bitcast(value, dest_ty) self.bitcast(value, dest_ty)
}, },
(true, true) => self.cx.context.new_cast(None, value, dest_ty), (true, true) => self.cx.context.new_cast(None, value, dest_ty),
@ -1307,7 +1082,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let src = self.pointercast(src, self.type_ptr_to(self.type_void())); let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
let memcpy = self.context.get_builtin_function("memcpy"); let memcpy = self.context.get_builtin_function("memcpy");
let block = self.block.expect("block"); let block = self.block.expect("block");
// TODO: handle aligns and is_volatile. // TODO(antoyo): handle aligns and is_volatile.
block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size])); block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
} }
@ -1326,7 +1101,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let memmove = self.context.get_builtin_function("memmove"); let memmove = self.context.get_builtin_function("memmove");
let block = self.block.expect("block"); let block = self.block.expect("block");
// TODO: handle is_volatile. // TODO(antoyo): handle is_volatile.
block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size])); block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
} }
@ -1335,8 +1110,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let ptr = self.pointercast(ptr, self.type_i8p()); let ptr = self.pointercast(ptr, self.type_i8p());
let memset = self.context.get_builtin_function("memset"); let memset = self.context.get_builtin_function("memset");
let block = self.block.expect("block"); let block = self.block.expect("block");
// TODO: handle aligns and is_volatile. // TODO(antoyo): handle align and is_volatile.
//println!("memset: {:?} -> {:?}", fill_byte, self.i32_type);
let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type); let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
let size = self.intcast(size, self.type_size_t(), false); let size = self.intcast(size, self.type_size_t(), false);
block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size])); block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
@ -1370,27 +1144,18 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
#[allow(dead_code)] #[allow(dead_code)]
fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> { fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
//unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
} }
fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> { fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
//unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
} }
fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> { fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*unsafe {
let elt_ty = self.cx.val_ty(elt);
let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
}*/
} }
fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> { fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
// FIXME: it would be better if the API only called this on struct, not on arrays. // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
assert_eq!(idx as usize as u64, idx); assert_eq!(idx as usize as u64, idx);
let value_type = aggregate_value.get_type(); let value_type = aggregate_value.get_type();
@ -1418,12 +1183,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
else { else {
panic!("Unexpected type {:?}", value_type); panic!("Unexpected type {:?}", value_type);
} }
/*assert_eq!(idx as c_uint as u64, idx);
unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }*/
} }
fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> { fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
// FIXME: it would be better if the API only called this on struct, not on arrays. // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
assert_eq!(idx as usize as u64, idx); assert_eq!(idx as usize as u64, idx);
let value_type = aggregate_value.get_type(); let value_type = aggregate_value.get_type();
@ -1459,88 +1222,41 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]); let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
self.current_func().new_local(None, struct_type.as_type(), "landing_pad") self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
.to_rvalue() .to_rvalue()
// TODO // TODO(antoyo): Properly implement unwinding.
/*unsafe { // the above is just to make the compilation work as it seems
llvm::LLVMBuildLandingPad(self.llbuilder, ty, pers_fn, num_clauses as c_uint, UNNAMED) // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort.
}*/
} }
fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) { fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) {
// TODO // TODO(antoyo)
/*unsafe {
llvm::LLVMSetCleanup(landing_pad, llvm::True);
}*/
} }
fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> { fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
//unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) }
} }
fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet { fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
unimplemented!(); unimplemented!();
/*let name = const_cstr!("cleanuppad");
let ret = unsafe {
llvm::LLVMRustBuildCleanupPad(
self.llbuilder,
parent,
args.len() as c_uint,
args.as_ptr(),
name.as_ptr(),
)
};
Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))*/
} }
fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc> { fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*let ret =
unsafe { llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind) };
ret.expect("LLVM does not have support for cleanupret")*/
} }
fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet { fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
unimplemented!(); unimplemented!();
/*let name = const_cstr!("catchpad");
let ret = unsafe {
llvm::LLVMRustBuildCatchPad(
self.llbuilder,
parent,
args.len() as c_uint,
args.as_ptr(),
name.as_ptr(),
)
};
Funclet::new(ret.expect("LLVM does not have support for catchpad"))*/
} }
fn catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc> { fn catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*let name = const_cstr!("catchswitch");
let ret = unsafe {
llvm::LLVMRustBuildCatchSwitch(
self.llbuilder,
parent,
unwind,
num_handlers as c_uint,
name.as_ptr(),
)
};
ret.expect("LLVM does not have support for catchswitch")*/
} }
fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) { fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) {
unimplemented!(); unimplemented!();
/*unsafe {
llvm::LLVMRustAddHandler(catch_switch, handler);
}*/
} }
fn set_personality_fn(&mut self, _personality: RValue<'gcc>) { fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
// TODO // TODO(antoyo)
/*unsafe {
llvm::LLVMSetPersonalityFn(self.llfn(), personality);
}*/
} }
// Atomic Operations // Atomic Operations
@ -1551,7 +1267,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false); let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result"); let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
let align = Align::from_bits(64).expect("align"); // TODO: use good align. let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align.
let value_type = result.to_rvalue().get_type(); let value_type = result.to_rvalue().get_type();
if let Some(struct_type) = value_type.is_struct() { if let Some(struct_type) = value_type.is_struct() {
@ -1560,7 +1276,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
// expected so that we store expected after the call. // expected so that we store expected after the call.
self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align); self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
} }
// TODO: handle when value is not a struct. // TODO(antoyo): handle when value is not a struct.
result.to_rvalue() result.to_rvalue()
} }
@ -1589,7 +1305,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let void_ptr_type = self.context.new_type::<*mut ()>(); let void_ptr_type = self.context.new_type::<*mut ()>();
let volatile_void_ptr_type = void_ptr_type.make_volatile(); let volatile_void_ptr_type = void_ptr_type.make_volatile();
let dst = self.context.new_cast(None, dst, volatile_void_ptr_type); let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
// NOTE: not sure why, but we have the wrong type here. // FIXME(antoyo): not sure why, but we have the wrong type here.
let new_src_type = atomic_function.get_param(1).to_rvalue().get_type(); let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
let src = self.context.new_cast(None, src, new_src_type); let src = self.context.new_cast(None, src, new_src_type);
let res = self.context.new_call(None, atomic_function, &[dst, src, order]); let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
@ -1610,28 +1326,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn set_invariant_load(&mut self, load: RValue<'gcc>) { fn set_invariant_load(&mut self, load: RValue<'gcc>) {
// NOTE: Hack to consider vtable function pointer as non-global-variable function pointer. // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
self.normal_function_addresses.borrow_mut().insert(load); self.normal_function_addresses.borrow_mut().insert(load);
// TODO // TODO(antoyo)
/*unsafe {
llvm::LLVMSetMetadata(
load,
llvm::MD_invariant_load as c_uint,
llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
);
}*/
} }
fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) { fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
// TODO // TODO(antoyo)
//self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
} }
fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) { fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
// TODO // TODO(antoyo)
//self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
} }
fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> { fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
// FIXME: remove when having a proper API. // FIXME(antoyo): remove when having a proper API.
let gcc_func = unsafe { std::mem::transmute(func) }; let gcc_func = unsafe { std::mem::transmute(func) };
if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() { if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
self.function_call(func, args, funclet) self.function_call(func, args, funclet)
@ -1643,13 +1350,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> { fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
// FIXME: this does not zero-extend. // FIXME(antoyo): this does not zero-extend.
if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) { if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
// FIXME: hack because base::from_immediate converts i1 to i8. // FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
// Fix the code in codegen_ssa::base::from_immediate. // Fix the code in codegen_ssa::base::from_immediate.
return value; return value;
} }
//println!("zext: {:?} -> {:?}", value, dest_typ);
self.context.new_cast(None, value, dest_typ) self.context.new_cast(None, value, dest_typ)
} }
@ -1659,7 +1365,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn do_not_inline(&mut self, _llret: RValue<'gcc>) { fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
unimplemented!(); unimplemented!();
//llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
} }
fn set_span(&mut self, _span: Span) {} fn set_span(&mut self, _span: Span) {}
@ -1690,24 +1395,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) { fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
unimplemented!(); unimplemented!();
/*debug!(
"instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
fn_name, hash, num_counters, index
);
let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
let args = &[fn_name, hash, num_counters, index];
let args = self.check_call("call", llfn, args);
unsafe {
let _ = llvm::LLVMRustBuildCall(
self.llbuilder,
llfn,
args.as_ptr() as *const &llvm::Value,
args.len() as c_uint,
None,
);
}*/
} }
} }
@ -1766,7 +1453,7 @@ impl ToGccComp for IntPredicate {
impl ToGccComp for RealPredicate { impl ToGccComp for RealPredicate {
fn to_gcc_comparison(&self) -> ComparisonOp { fn to_gcc_comparison(&self) -> ComparisonOp {
// TODO: check that ordered vs non-ordered is respected. // TODO(antoyo): check that ordered vs non-ordered is respected.
match *self { match *self {
RealPredicate::RealPredicateFalse => unreachable!(), RealPredicate::RealPredicateFalse => unreachable!(),
RealPredicate::RealOEQ => ComparisonOp::Equals, RealPredicate::RealOEQ => ComparisonOp::Equals,
@ -1809,9 +1496,9 @@ impl ToGccOrdering for AtomicOrdering {
let ordering = let ordering =
match self { match self {
AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO: check if that's the same. AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
AtomicOrdering::Unordered => __ATOMIC_RELAXED, AtomicOrdering::Unordered => __ATOMIC_RELAXED,
AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO: check if that's the same. AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
AtomicOrdering::Acquire => __ATOMIC_ACQUIRE, AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
AtomicOrdering::Release => __ATOMIC_RELEASE, AtomicOrdering::Release => __ATOMIC_RELEASE,
AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL, AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,

View File

@ -17,8 +17,6 @@ use crate::context::CodegenCx;
pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> { pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> {
let tcx = cx.tcx(); let tcx = cx.tcx();
//debug!("get_fn(instance={:?})", instance);
assert!(!instance.substs.needs_infer()); assert!(!instance.substs.needs_infer());
assert!(!instance.substs.has_escaping_bound_vars()); assert!(!instance.substs.has_escaping_bound_vars());
assert!(!instance.substs.has_param_types_or_consts()); assert!(!instance.substs.has_param_types_or_consts());
@ -28,11 +26,9 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
} }
let sym = tcx.symbol_name(instance).name; let sym = tcx.symbol_name(instance).name;
//debug!("get_fn({:?}: {:?}) => {}", instance, instance.monomorphic_ty(cx.tcx()), sym);
let fn_abi = FnAbi::of_instance(cx, instance, &[]); let fn_abi = FnAbi::of_instance(cx, instance, &[]);
// TODO
let func = let func =
if let Some(func) = cx.get_declared_value(&sym) { if let Some(func) = cx.get_declared_value(&sym) {
// Create a fn pointer with the new signature. // Create a fn pointer with the new signature.
@ -62,34 +58,18 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
// reference. It also occurs when testing libcore and in some // reference. It also occurs when testing libcore and in some
// other weird situations. Annoying. // other weird situations. Annoying.
if cx.val_ty(func) != ptrty { if cx.val_ty(func) != ptrty {
//debug!("get_fn: casting {:?} to {:?}", func, ptrty); // TODO(antoyo): cast the pointer.
// TODO
//cx.const_ptrcast(func, ptrty)
func func
} }
else { else {
//debug!("get_fn: not casting pointer!");
func func
} }
} }
else { else {
cx.linkage.set(FunctionType::Extern); cx.linkage.set(FunctionType::Extern);
let func = cx.declare_fn(&sym, &fn_abi); let func = cx.declare_fn(&sym, &fn_abi);
//cx.linkage.set(FunctionType::Internal);
//debug!("get_fn: not casting pointer!");
// TODO
//attributes::from_fn_attrs(cx, func, instance);
//let instance_def_id = instance.def_id();
// TODO
/*if cx.use_dll_storage_attrs && tcx.is_dllimport_foreign_item(instance_def_id) {
unsafe {
llvm::LLVMSetDLLStorageClass(func, llvm::DLLStorageClass::DllImport);
}
}*/
// TODO(antoyo): set linkage and attributes.
func func
}; };

View File

@ -27,7 +27,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
} }
fn const_cstr(&self, symbol: Symbol, _null_terminated: bool) -> RValue<'gcc> { fn const_cstr(&self, symbol: Symbol, _null_terminated: bool) -> RValue<'gcc> {
// TODO: handle null_terminated. // TODO(antoyo): handle null_terminated.
if let Some(&value) = self.const_cstr_cache.borrow().get(&symbol) { if let Some(&value) = self.const_cstr_cache.borrow().get(&symbol) {
return value.to_rvalue(); return value.to_rvalue();
} }
@ -39,7 +39,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
} }
fn global_string(&self, string: &str) -> RValue<'gcc> { fn global_string(&self, string: &str) -> RValue<'gcc> {
// TODO: handle non-null-terminated strings. // TODO(antoyo): handle non-null-terminated strings.
let string = self.context.new_string_literal(&*string); let string = self.context.new_string_literal(&*string);
let sym = self.generate_local_symbol_name("str"); let sym = self.generate_local_symbol_name("str");
// NOTE: TLS is always off for a string litteral. // NOTE: TLS is always off for a string litteral.
@ -48,7 +48,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
.unwrap_or_else(|| bug!("symbol `{}` is already defined", sym)); .unwrap_or_else(|| bug!("symbol `{}` is already defined", sym));
self.global_init_block.add_assignment(None, global.dereference(None), string); self.global_init_block.add_assignment(None, global.dereference(None), string);
global.to_rvalue() global.to_rvalue()
//llvm::LLVMRustSetLinkage(global, llvm::Linkage::InternalLinkage); // TODO(antoyo): set linkage.
} }
pub fn inttoptr(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { pub fn inttoptr(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
@ -62,7 +62,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
} }
pub fn ptrtoint(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { pub fn ptrtoint(&self, block: Block<'gcc>, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
// TODO: when libgccjit allow casting from pointer to int, remove this. // TODO(antoyo): when libgccjit allow casting from pointer to int, remove this.
let func = block.get_function(); let func = block.get_function();
let local = func.new_local(None, value.get_type(), "ptrLocal"); let local = func.new_local(None, value.get_type(), "ptrLocal");
block.add_assignment(None, local, value); block.add_assignment(None, local, value);
@ -71,10 +71,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let ptr = self.context.new_cast(None, ptr_address, dest_ty.make_pointer()); let ptr = self.context.new_cast(None, ptr_address, dest_ty.make_pointer());
ptr.dereference(None).to_rvalue() ptr.dereference(None).to_rvalue()
} }
/*pub fn const_vector(&self, elements: &[RValue<'gcc>]) -> RValue<'gcc> {
self.context.new_rvalue_from_vector(None, elements[0].get_type(), elements)
}*/
} }
pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> { pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> {
@ -125,13 +121,13 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> { fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
let num64: Result<i64, _> = num.try_into(); let num64: Result<i64, _> = num.try_into();
if let Ok(num) = num64 { if let Ok(num) = num64 {
// FIXME: workaround for a bug where libgccjit is expecting a constant. // FIXME(antoyo): workaround for a bug where libgccjit is expecting a constant.
// The operations >> 64 and | low are making the normal case a non-constant. // The operations >> 64 and | low are making the normal case a non-constant.
return self.context.new_rvalue_from_long(typ, num as i64); return self.context.new_rvalue_from_long(typ, num as i64);
} }
if num >> 64 != 0 { if num >> 64 != 0 {
// FIXME: use a new function new_rvalue_from_unsigned_long()? // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
let low = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64); let low = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
let high = self.context.new_rvalue_from_long(typ, (num >> 64) as u64 as i64); let high = self.context.new_rvalue_from_long(typ, (num >> 64) as u64 as i64);
@ -175,12 +171,10 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn const_u8(&self, _i: u8) -> RValue<'gcc> { fn const_u8(&self, _i: u8) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
//self.const_uint(self.type_i8(), i as u64)
} }
fn const_real(&self, _t: Type<'gcc>, _val: f64) -> RValue<'gcc> { fn const_real(&self, _t: Type<'gcc>, _val: f64) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
//unsafe { llvm::LLVMConstReal(t, val) }
} }
fn const_str(&self, s: Symbol) -> (RValue<'gcc>, RValue<'gcc>) { fn const_str(&self, s: Symbol) -> (RValue<'gcc>, RValue<'gcc>) {
@ -195,7 +189,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let fields: Vec<_> = values.iter() let fields: Vec<_> = values.iter()
.map(|value| value.get_type()) .map(|value| value.get_type())
.collect(); .collect();
// TODO: cache the type? It's anonymous, so probably not. // TODO(antoyo): cache the type? It's anonymous, so probably not.
let name = fields.iter().map(|typ| format!("{:?}", typ)).collect::<Vec<_>>().join("_"); let name = fields.iter().map(|typ| format!("{:?}", typ)).collect::<Vec<_>>().join("_");
let typ = self.type_struct(&fields, packed); let typ = self.type_struct(&fields, packed);
let structure = self.global_init_func.new_local(None, typ, &name); let structure = self.global_init_func.new_local(None, typ, &name);
@ -209,19 +203,13 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
} }
fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> { fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> {
// TODO // TODO(antoyo)
None None
//try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) })
} }
fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> { fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> {
// TODO // TODO(antoyo)
None None
/*try_as_const_integral(v).and_then(|v| unsafe {
let (mut lo, mut hi) = (0u64, 0u64);
let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo);
success.then_some(hi_lo_to_u128(lo, hi))
})*/
} }
fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> { fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
@ -234,7 +222,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
Scalar::Int(int) => { Scalar::Int(int) => {
let data = int.assert_bits(layout.value.size(self)); let data = int.assert_bits(layout.value.size(self));
// FIXME: there's some issues with using the u128 code that follows, so hard-code // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
// the paths for floating-point values. // the paths for floating-point values.
if ty == self.float_type { if ty == self.float_type {
return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64); return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
@ -262,8 +250,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
_ => self.static_addr_of(init, alloc.align, None), _ => self.static_addr_of(init, alloc.align, None),
}; };
if !self.sess().fewer_names() { if !self.sess().fewer_names() {
// TODO // TODO(antoyo): set value name.
//llvm::set_value_name(value, format!("{:?}", ptr.alloc_id).as_bytes());
} }
value value
}, },

View File

@ -32,22 +32,11 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> { fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
if let Some(global_value) = self.const_globals.borrow().get(&cv) { if let Some(global_value) = self.const_globals.borrow().get(&cv) {
// TODO // TODO(antoyo): upgrade alignment.
/*unsafe {
// Upgrade the alignment in cases where the same constant is used with different
// alignment requirements
let llalign = align.bytes() as u32;
if llalign > llvm::LLVMGetAlignment(gv) {
llvm::LLVMSetAlignment(gv, llalign);
}
}*/
return *global_value; return *global_value;
} }
let global_value = self.static_addr_of_mut(cv, align, kind); let global_value = self.static_addr_of_mut(cv, align, kind);
// TODO // TODO(antoyo): set global constant.
/*unsafe {
llvm::LLVMSetGlobalConstant(global_value, True);
}*/
self.const_globals.borrow_mut().insert(cv, global_value); self.const_globals.borrow_mut().insert(cv, global_value);
global_value global_value
} }
@ -73,9 +62,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
let val_llty = self.val_ty(value); let val_llty = self.val_ty(value);
let value = let value =
if val_llty == self.type_i1() { if val_llty == self.type_i1() {
//val_llty = self.type_i8();
unimplemented!(); unimplemented!();
//llvm::LLVMConstZExt(value, val_llty)
} }
else { else {
value value
@ -92,26 +79,17 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
else { else {
// If we created the global with the wrong type, // If we created the global with the wrong type,
// correct the type. // correct the type.
/*let name = llvm::get_value_name(global).to_vec(); // TODO(antoyo): set value name, linkage and visibility.
llvm::set_value_name(global, b"");
let linkage = llvm::LLVMRustGetLinkage(global);
let visibility = llvm::LLVMRustGetVisibility(global);*/
let new_global = self.get_or_insert_global(&name, val_llty, is_tls, attrs.link_section); let new_global = self.get_or_insert_global(&name, val_llty, is_tls, attrs.link_section);
/*llvm::LLVMRustSetLinkage(new_global, linkage);
llvm::LLVMRustSetVisibility(new_global, visibility);*/
// To avoid breaking any invariants, we leave around the old // To avoid breaking any invariants, we leave around the old
// global for the moment; we'll replace all references to it // global for the moment; we'll replace all references to it
// with the new global later. (See base::codegen_backend.) // with the new global later. (See base::codegen_backend.)
//self.statics_to_rauw.borrow_mut().push((global, new_global)); //self.statics_to_rauw.borrow_mut().push((global, new_global));
new_global new_global
}; };
// TODO // TODO(antoyo): set alignment and initializer.
//set_global_alignment(&self, global, self.align_of(ty));
//llvm::LLVMSetInitializer(global, value);
let value = self.rvalue_as_lvalue(value); let value = self.rvalue_as_lvalue(value);
let value = value.get_address(None); let value = value.get_address(None);
let dest_typ = global.get_type(); let dest_typ = global.get_type();
@ -119,14 +97,14 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
// NOTE: do not init the variables related to argc/argv because it seems we cannot // NOTE: do not init the variables related to argc/argv because it seems we cannot
// overwrite those variables. // overwrite those variables.
// FIXME: correctly support global variable initialization. // FIXME(antoyo): correctly support global variable initialization.
let skip_init = [ let skip_init = [
ARGV_INIT_ARRAY, ARGV_INIT_ARRAY,
ARGC, ARGC,
ARGV, ARGV,
]; ];
if !skip_init.iter().any(|symbol_name| name.starts_with(symbol_name)) { if !skip_init.iter().any(|symbol_name| name.starts_with(symbol_name)) {
// TODO: switch to set_initializer when libgccjit supports that. // TODO(antoyo): switch to set_initializer when libgccjit supports that.
let memcpy = self.context.get_builtin_function("memcpy"); let memcpy = self.context.get_builtin_function("memcpy");
let dst = self.context.new_cast(None, global, self.type_i8p()); let dst = self.context.new_cast(None, global, self.type_i8p());
let src = self.context.new_cast(None, value, self.type_ptr_to(self.type_void())); let src = self.context.new_cast(None, value, self.type_ptr_to(self.type_void()));
@ -138,13 +116,10 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
// mutability are placed into read-only memory. // mutability are placed into read-only memory.
if !is_mutable { if !is_mutable {
if self.type_is_freeze(ty) { if self.type_is_freeze(ty) {
// TODO // TODO(antoyo): set global constant.
//llvm::LLVMSetGlobalConstant(global, llvm::True);
} }
} }
//debuginfo::create_global_var_metadata(&self, def_id, global);
if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
// Do not allow LLVM to change the alignment of a TLS on macOS. // Do not allow LLVM to change the alignment of a TLS on macOS.
// //
@ -184,19 +159,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
// happens to be zero. Instead, we should only check the value of defined bytes // happens to be zero. Instead, we should only check the value of defined bytes
// and set all undefined bytes to zero if this allocation is headed for the // and set all undefined bytes to zero if this allocation is headed for the
// BSS. // BSS.
/*let all_bytes_are_zero = alloc.relocations().is_empty()
&& alloc
.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
.iter()
.all(|&byte| byte == 0);
let sect_name = if all_bytes_are_zero {
CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0")
} else {
CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0")
};*/
unimplemented!(); unimplemented!();
//llvm::LLVMSetSection(global, sect_name.as_ptr());
} }
} }
@ -205,34 +168,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
if let Some(_section) = attrs.link_section { if let Some(_section) = attrs.link_section {
unimplemented!(); unimplemented!();
/*let section = llvm::LLVMMDStringInContext(
self.llcx,
section.as_str().as_ptr().cast(),
section.as_str().len() as c_uint,
);
assert!(alloc.relocations().is_empty());
// The `inspect` method is okay here because we checked relocations, and
// because we are doing this access to inspect the final interpreter state (not
// as part of the interpreter execution).
let bytes =
alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len());
let alloc = llvm::LLVMMDStringInContext(
self.llcx,
bytes.as_ptr().cast(),
bytes.len() as c_uint,
);
let data = [section, alloc];
let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2);
llvm::LLVMAddNamedMetadataOperand(
self.llmod,
"wasm.custom_sections\0".as_ptr().cast(),
meta,
);*/
} }
} else { } else {
// TODO // TODO(antoyo): set link section.
//base::set_link_section(global, &attrs);
} }
if attrs.flags.contains(CodegenFnAttrFlags::USED) { if attrs.flags.contains(CodegenFnAttrFlags::USED) {
@ -242,9 +180,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
/// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*. /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
fn add_used_global(&self, _global: RValue<'gcc>) { fn add_used_global(&self, _global: RValue<'gcc>) {
// TODO // TODO(antoyo)
//let cast = self.context.new_cast(None, global, self.type_i8p());
//self.used_statics.borrow_mut().push(cast);
} }
} }
@ -254,13 +190,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
match kind { match kind {
Some(kind) if !self.tcx.sess.fewer_names() => { Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind); let name = self.generate_local_symbol_name(kind);
// TODO: check if it's okay that TLS is off here. // TODO(antoyo): check if it's okay that TLS is off here.
// TODO: check if it's okay that link_section is None here. // TODO(antoyo): check if it's okay that link_section is None here.
// TODO: set alignment here as well. // TODO(antoyo): set alignment here as well.
let gv = self.define_global(&name[..], self.val_ty(cv), false, None).unwrap_or_else(|| { let gv = self.define_global(&name[..], self.val_ty(cv), false, None).unwrap_or_else(|| {
bug!("symbol `{}` is already defined", name); bug!("symbol `{}` is already defined", name);
}); });
//llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); // TODO(antoyo): set linkage.
(name, gv) (name, gv)
} }
_ => { _ => {
@ -271,13 +207,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
(name, global) (name, global)
}, },
}; };
// FIXME: I think the name coming from generate_local_symbol_name() above cannot be used // FIXME(antoyo): I think the name coming from generate_local_symbol_name() above cannot be used
// globally. // globally.
// NOTE: global seems to only be global in a module. So save the name instead of the value // NOTE: global seems to only be global in a module. So save the name instead of the value
// to import it later. // to import it later.
self.global_names.borrow_mut().insert(cv, name); self.global_names.borrow_mut().insert(cv, name);
self.global_init_block.add_assignment(None, gv.dereference(None), cv); self.global_init_block.add_assignment(None, gv.dereference(None), cv);
//llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global); // TODO(antoyo): set unnamed address.
gv gv
} }
@ -285,19 +221,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let instance = Instance::mono(self.tcx, def_id); let instance = Instance::mono(self.tcx, def_id);
let fn_attrs = self.tcx.codegen_fn_attrs(def_id); let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
if let Some(&global) = self.instances.borrow().get(&instance) { if let Some(&global) = self.instances.borrow().get(&instance) {
/*let attrs = self.tcx.codegen_fn_attrs(def_id);
let name = &*self.tcx.symbol_name(instance).name;
let name =
if let Some(linkage) = attrs.linkage {
// This is to match what happens in check_and_apply_linkage.
Cow::from(format!("_rust_extern_with_linkage_{}", name))
}
else {
Cow::from(name)
};
let global = self.context.new_global(None, GlobalKind::Imported, global.get_type(), &name)
.get_address(None);
self.global_names.borrow_mut().insert(global, name.to_string());*/
return global; return global;
} }
@ -313,8 +236,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
let sym = self.tcx.symbol_name(instance).name; let sym = self.tcx.symbol_name(instance).name;
//debug!("get_static: sym={} instance={:?}", sym, instance);
let global = let global =
if let Some(def_id) = def_id.as_local() { if let Some(def_id) = def_id.as_local() {
let id = self.tcx.hir().local_def_id_to_hir_id(def_id); let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
@ -332,9 +253,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let global = self.declare_global(&sym, llty, is_tls, fn_attrs.link_section); let global = self.declare_global(&sym, llty, is_tls, fn_attrs.link_section);
if !self.tcx.is_reachable_non_generic(def_id) { if !self.tcx.is_reachable_non_generic(def_id) {
/*unsafe { // TODO(antoyo): set visibility.
llvm::LLVMRustSetVisibility(global, llvm::Visibility::Hidden);
}*/
} }
global global
@ -352,8 +271,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
item => bug!("get_static: expected static, found {:?}", item), item => bug!("get_static: expected static, found {:?}", item),
}; };
//debug!("get_static: sym={} attrs={:?}", sym, attrs);
global global
} }
else { else {
@ -364,11 +281,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let span = self.tcx.def_span(def_id); let span = self.tcx.def_span(def_id);
let global = check_and_apply_linkage(&self, &attrs, ty, sym, span); let global = check_and_apply_linkage(&self, &attrs, ty, sym, span);
let needs_dll_storage_attr = false; /*self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) && let needs_dll_storage_attr = false; // TODO(antoyo)
// ThinLTO can't handle this workaround in all cases, so we don't
// emit the attrs. Instead we make them unnecessary by disallowing
// dynamic linking when linker plugin based LTO is enabled.
!self.tcx.sess.opts.cg.linker_plugin_lto.enabled();*/
// If this assertion triggers, there's something wrong with commandline // If this assertion triggers, there's something wrong with commandline
// argument validation. // argument validation.
@ -391,20 +304,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
// is_codegened_item query. // is_codegened_item query.
if !self.tcx.is_codegened_item(def_id) { if !self.tcx.is_codegened_item(def_id) {
unimplemented!(); unimplemented!();
/*unsafe {
llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport);
}*/
} }
} }
global global
}; };
/*if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) { // TODO(antoyo): set dll storage class.
// For foreign (native) libs we know the exact storage type to use.
unsafe {
llvm::LLVMSetDLLStorageClass(global, llvm::DLLStorageClass::DllImport);
}
}*/
self.instances.borrow_mut().insert(instance, global); self.instances.borrow_mut().insert(instance, global);
global global
@ -474,8 +379,6 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg
let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL); let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
let llty = cx.layout_of(ty).gcc_type(cx, true); let llty = cx.layout_of(ty).gcc_type(cx, true);
if let Some(linkage) = attrs.linkage { if let Some(linkage) = attrs.linkage {
//debug!("get_static: sym={} linkage={:?}", sym, linkage);
// If this is a static with a linkage specified, then we need to handle // If this is a static with a linkage specified, then we need to handle
// it a little specially. The typesystem prevents things like &T and // it a little specially. The typesystem prevents things like &T and
// extern "C" fn() from being non-null, so we can't just declare a // extern "C" fn() from being non-null, so we can't just declare a
@ -506,10 +409,10 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg
cx.define_global(&real_name, llty, is_tls, attrs.link_section).unwrap_or_else(|| { cx.define_global(&real_name, llty, is_tls, attrs.link_section).unwrap_or_else(|| {
cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym)) cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym))
}); });
//llvm::LLVMRustSetLinkage(global2, llvm::Linkage::InternalLinkage); // TODO(antoyo): set linkage.
let lvalue = global2.dereference(None); let lvalue = global2.dereference(None);
cx.global_init_block.add_assignment(None, lvalue, global1); cx.global_init_block.add_assignment(None, lvalue, global1);
//llvm::LLVMSetInitializer(global2, global1); // TODO(antoyo): use global_set_initializer() when it will work.
global2 global2
} }
else { else {

View File

@ -41,7 +41,7 @@ pub struct CodegenCx<'gcc, 'tcx> {
pub codegen_unit: &'tcx CodegenUnit<'tcx>, pub codegen_unit: &'tcx CodegenUnit<'tcx>,
pub context: &'gcc Context<'gcc>, pub context: &'gcc Context<'gcc>,
// TODO: First set it to a dummy block to avoid using Option? // TODO(antoyo): First set it to a dummy block to avoid using Option?
pub current_block: RefCell<Option<Block<'gcc>>>, pub current_block: RefCell<Option<Block<'gcc>>>,
pub current_func: RefCell<Option<Function<'gcc>>>, pub current_func: RefCell<Option<Function<'gcc>>>,
pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>, pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
@ -104,7 +104,7 @@ pub struct CodegenCx<'gcc, 'tcx> {
/// Cache of globals. /// Cache of globals.
pub globals: RefCell<FxHashMap<String, RValue<'gcc>>>, pub globals: RefCell<FxHashMap<String, RValue<'gcc>>>,
// TODO: remove global_names. // TODO(antoyo): remove global_names.
pub global_names: RefCell<FxHashMap<RValue<'gcc>, String>>, pub global_names: RefCell<FxHashMap<RValue<'gcc>, String>>,
/// A counter that is used for generating local symbol names /// A counter that is used for generating local symbol names
@ -119,13 +119,13 @@ pub struct CodegenCx<'gcc, 'tcx> {
/// `const_undef()` returns struct as pointer so that they can later be assigned a value. /// `const_undef()` returns struct as pointer so that they can later be assigned a value.
/// As such, this set remembers which of these pointers were returned by this function so that /// As such, this set remembers which of these pointers were returned by this function so that
/// they can be derefered later. /// they can be derefered later.
/// FIXME: fix the rustc API to avoid having this hack. /// FIXME(antoyo): fix the rustc API to avoid having this hack.
pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>, pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>,
/// Store the pointer of different types for safety. /// Store the pointer of different types for safety.
/// When casting the values back to their original types, check that they are indeed that type /// When casting the values back to their original types, check that they are indeed that type
/// with these sets. /// with these sets.
/// FIXME: remove when the API supports more types. /// FIXME(antoyo): remove when the API supports more types.
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
lvalues: RefCell<FxHashSet<LValue<'gcc>>>, lvalues: RefCell<FxHashSet<LValue<'gcc>>>,
} }
@ -133,22 +133,20 @@ pub struct CodegenCx<'gcc, 'tcx> {
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self { pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
let check_overflow = tcx.sess.overflow_checks(); let check_overflow = tcx.sess.overflow_checks();
// TODO: fix this mess. libgccjit seems to return random type when using new_int_type(). // TODO(antoyo): fix this mess. libgccjit seems to return random type when using new_int_type().
//let isize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, true);
let isize_type = context.new_c_type(CType::LongLong); let isize_type = context.new_c_type(CType::LongLong);
//let usize_type = context.new_int_type((tcx.data_layout.pointer_size.bits() / 8) as i32, false);
let usize_type = context.new_c_type(CType::ULongLong); let usize_type = context.new_c_type(CType::ULongLong);
let bool_type = context.new_type::<bool>(); let bool_type = context.new_type::<bool>();
let i8_type = context.new_type::<i8>(); let i8_type = context.new_type::<i8>();
let i16_type = context.new_type::<i16>(); let i16_type = context.new_type::<i16>();
let i32_type = context.new_type::<i32>(); let i32_type = context.new_type::<i32>();
let i64_type = context.new_c_type(CType::LongLong); let i64_type = context.new_c_type(CType::LongLong);
let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO: should this be hard-coded? let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
let u8_type = context.new_type::<u8>(); let u8_type = context.new_type::<u8>();
let u16_type = context.new_type::<u16>(); let u16_type = context.new_type::<u16>();
let u32_type = context.new_type::<u32>(); let u32_type = context.new_type::<u32>();
let u64_type = context.new_c_type(CType::ULongLong); let u64_type = context.new_c_type(CType::ULongLong);
let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO: should this be hard-coded? let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?
let tls_model = to_gcc_tls_mode(tcx.sess.tls_model()); let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
@ -261,7 +259,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn rvalue_as_lvalue(&self, value: RValue<'gcc>) -> LValue<'gcc> { pub fn rvalue_as_lvalue(&self, value: RValue<'gcc>) -> LValue<'gcc> {
let lvalue: LValue<'gcc> = unsafe { std::mem::transmute(value) }; let lvalue: LValue<'gcc> = unsafe { std::mem::transmute(value) };
//debug_assert!(self.lvalues.borrow().contains(&lvalue), "{:?} is not an lvalue", value);
lvalue lvalue
} }
@ -276,11 +273,11 @@ impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> {
type BasicBlock = Block<'gcc>; type BasicBlock = Block<'gcc>;
type Type = Type<'gcc>; type Type = Type<'gcc>;
type Funclet = (); // TODO type Funclet = (); // TODO(antoyo)
type DIScope = (); // TODO type DIScope = (); // TODO(antoyo)
type DILocation = (); // TODO type DILocation = (); // TODO(antoyo)
type DIVariable = (); // TODO type DIVariable = (); // TODO(antoyo)
} }
impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
@ -295,17 +292,12 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
} }
fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> { fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
//let symbol = self.tcx.symbol_name(instance).name;
let func = get_fn(self, instance); let func = get_fn(self, instance);
let func = self.rvalue_as_function(func); let func = self.rvalue_as_function(func);
let ptr = func.get_address(None); let ptr = func.get_address(None);
// TODO: don't do this twice: i.e. in declare_fn and here. // TODO(antoyo): don't do this twice: i.e. in declare_fn and here.
//let fn_abi = FnAbi::of_instance(self, instance, &[]); // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
//let (return_type, params, _) = fn_abi.gcc_type(self);
// FIXME: the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
//let pointer_type = ptr.get_type();
self.normal_function_addresses.borrow_mut().insert(ptr); self.normal_function_addresses.borrow_mut().insert(ptr);
@ -354,12 +346,12 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
"rust_eh_personality" "rust_eh_personality"
}; };
//let func = self.declare_func(name, self.type_i32(), &[], true); //let func = self.declare_func(name, self.type_i32(), &[], true);
// FIXME: this hack should not be needed. That will probably be removed when // FIXME(antoyo): this hack should not be needed. That will probably be removed when
// unwinding support is added. // unwinding support is added.
self.context.new_rvalue_from_int(self.int_type, 0) self.context.new_rvalue_from_int(self.int_type, 0)
} }
}; };
//attributes::apply_target_cpu_attr(self, llfn); // TODO(antoyo): apply target cpu attributes.
self.eh_personality.set(Some(llfn)); self.eh_personality.set(Some(llfn));
llfn llfn
} }
@ -378,32 +370,18 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> { fn used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
unimplemented!(); unimplemented!();
//&self.used_statics
} }
fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) { fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) {
// TODO // TODO(antoyo)
//attributes::set_frame_pointer_type(self, llfn)
} }
fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) { fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) {
// TODO // TODO(antoyo)
//attributes::apply_target_cpu_attr(self, llfn)
} }
fn create_used_variable(&self) { fn create_used_variable(&self) {
unimplemented!(); unimplemented!();
/*let name = const_cstr!("llvm.used");
let section = const_cstr!("llvm.metadata");
let array =
self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow());
unsafe {
let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
llvm::LLVMSetInitializer(g, array);
llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
llvm::LLVMSetSection(g, section.as_ptr());
}*/
} }
fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> { fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {

View File

@ -20,99 +20,31 @@ impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx
_function_source_hash: u64, _function_source_hash: u64,
) -> bool { ) -> bool {
unimplemented!(); unimplemented!();
/*if let Some(coverage_context) = self.coverage_context() {
debug!(
"ensuring function source hash is set for instance={:?}; function_source_hash={}",
instance, function_source_hash,
);
let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
coverage_map
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.set_function_source_hash(function_source_hash);
true
} else {
false
}*/
} }
fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool { fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool {
/*if let Some(coverage_context) = self.coverage_context() { // TODO(antoyo)
debug!(
"adding counter to coverage_regions: instance={:?}, function_source_hash={}, id={:?}, \
at {:?}",
instance, function_source_hash, id, region,
);
let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
coverage_regions
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_counter(function_source_hash, id, region);
true
} else {
false
}*/
// TODO
false false
} }
fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option<CodeRegion>) -> bool { fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option<CodeRegion>) -> bool {
/*if let Some(coverage_context) = self.coverage_context() { // TODO(antoyo)
debug!(
"adding counter expression to coverage_regions: instance={:?}, id={:?}, {:?} {:?} {:?}, \
at {:?}",
instance, id, lhs, op, rhs, region,
);
let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
coverage_regions
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_counter_expression(id, lhs, op, rhs, region);
true
} else {
false
}*/
// TODO
false false
} }
fn add_coverage_unreachable(&mut self, _instance: Instance<'tcx>, _region: CodeRegion) -> bool { fn add_coverage_unreachable(&mut self, _instance: Instance<'tcx>, _region: CodeRegion) -> bool {
/*if let Some(coverage_context) = self.coverage_context() { // TODO(antoyo)
debug!(
"adding unreachable code to coverage_regions: instance={:?}, at {:?}",
instance, region,
);
let mut coverage_regions = coverage_context.function_coverage_map.borrow_mut();
coverage_regions
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_unreachable_region(region);
true
} else {
false
}*/
// TODO
false false
} }
} }
impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn coverageinfo_finalize(&self) { fn coverageinfo_finalize(&self) {
// TODO // TODO(antoyo)
//mapgen::finalize(self)
} }
fn get_pgo_func_name_var(&self, _instance: Instance<'tcx>) -> RValue<'gcc> { fn get_pgo_func_name_var(&self, _instance: Instance<'tcx>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*if let Some(coverage_context) = self.coverage_context() {
debug!("getting pgo_func_name_var for instance={:?}", instance);
let mut pgo_func_name_var_map = coverage_context.pgo_func_name_var_map.borrow_mut();
pgo_func_name_var_map
.entry(instance)
.or_insert_with(|| create_pgo_func_name_var(self, instance))
} else {
bug!("Could not get the `coverage_context`");
}*/
} }
/// Functions with MIR-based coverage are normally codegenned _only_ if /// Functions with MIR-based coverage are normally codegenned _only_ if
@ -133,8 +65,5 @@ impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
/// added as `unreachable_region`s. /// added as `unreachable_region`s.
fn define_unused_fn(&self, _def_id: DefId) { fn define_unused_fn(&self, _def_id: DefId) {
unimplemented!(); unimplemented!();
/*let instance = declare_unused_fn(self, &def_id);
codegen_unused_fn_and_counter(self, instance);
add_unused_function_coverage(self, instance, def_id);*/
} }
} }

View File

@ -17,58 +17,10 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
// names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.). // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) { fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) {
unimplemented!(); unimplemented!();
/*let cx = self.cx();
// Convert the direct and indirect offsets to address ops.
// FIXME(eddyb) use `const`s instead of getting the values via FFI,
// the values should match the ones in the DWARF standard anyway.
let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() };
let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() };
let mut addr_ops = SmallVec::<[_; 8]>::new();
if direct_offset.bytes() > 0 {
addr_ops.push(op_plus_uconst());
addr_ops.push(direct_offset.bytes() as i64);
} }
for &offset in indirect_offsets {
addr_ops.push(op_deref());
if offset.bytes() > 0 {
addr_ops.push(op_plus_uconst());
addr_ops.push(offset.bytes() as i64);
}
}
// FIXME(eddyb) maybe this information could be extracted from `dbg_var`,
// to avoid having to pass it down in both places?
// NB: `var` doesn't seem to know about the column, so that's a limitation.
let dbg_loc = cx.create_debug_loc(scope_metadata, span);
unsafe {
// FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
DIB(cx),
variable_alloca,
dbg_var,
addr_ops.as_ptr(),
addr_ops.len() as c_uint,
dbg_loc,
self.llbb(),
);
}*/
}
/*fn set_source_location(&mut self, scope: Self::DIScope, span: Span) {
unimplemented!();
/*debug!("set_source_location: {}", self.sess().source_map().span_to_string(span));
let dbg_loc = self.cx().create_debug_loc(scope, span);
unsafe {
llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc);
}*/
}*/
fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
// TODO: replace with gcc_jit_context_new_global_with_initializer() if it's added: // TODO(antoyo): replace with gcc_jit_context_new_global_with_initializer() if it's added:
// https://gcc.gnu.org/pipermail/jit/2020q3/001225.html // https://gcc.gnu.org/pipermail/jit/2020q3/001225.html
// //
// Call the function to initialize global values here. // Call the function to initialize global values here.
@ -76,7 +28,7 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
use std::iter; use std::iter;
for crate_num in self.cx.tcx.crates(()).iter().copied().chain(iter::once(LOCAL_CRATE)) { for crate_num in self.cx.tcx.crates(()).iter().copied().chain(iter::once(LOCAL_CRATE)) {
// FIXME: better way to find if a crate is of proc-macro type? // FIXME(antoyo): better way to find if a crate is of proc-macro type?
if crate_num == LOCAL_CRATE || self.cx.tcx.dep_kind(crate_num) != CrateDepKind::MacrosOnly { if crate_num == LOCAL_CRATE || self.cx.tcx.dep_kind(crate_num) != CrateDepKind::MacrosOnly {
// NOTE: proc-macro crates are not included in the executable, so don't call their // NOTE: proc-macro crates are not included in the executable, so don't call their
// initialization routine. // initialization routine.
@ -87,50 +39,25 @@ impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
} }
} }
// TODO // TODO(antoyo): insert reference to gdb debug scripts section global.
//gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
} }
fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) { fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {
unimplemented!(); unimplemented!();
// Avoid wasting time if LLVM value names aren't even enabled.
/*if self.sess().fewer_names() {
return;
}
// Only function parameters and instructions are local to a function,
// don't change the name of anything else (e.g. globals).
let param_or_inst = unsafe {
llvm::LLVMIsAArgument(value).is_some() || llvm::LLVMIsAInstruction(value).is_some()
};
if !param_or_inst {
return;
}
// Avoid replacing the name if it already exists.
// While we could combine the names somehow, it'd
// get noisy quick, and the usefulness is dubious.
if llvm::get_value_name(value).is_empty() {
llvm::set_value_name(value, name.as_bytes());
}*/
} }
fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) { fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) {
unimplemented!(); unimplemented!();
/*unsafe {
let dbg_loc_as_llval = llvm::LLVMRustMetadataAsValue(self.cx().llcx, dbg_loc);
llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc_as_llval);
}*/
} }
} }
impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn create_vtable_metadata(&self, _ty: Ty<'tcx>, _vtable: Self::Value) { fn create_vtable_metadata(&self, _ty: Ty<'tcx>, _vtable: Self::Value) {
//metadata::create_vtable_metadata(self, ty, vtable) // TODO(antoyo)
} }
fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>> { fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>> {
// TODO // TODO(antoyo)
None None
} }
@ -139,7 +66,7 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
} }
fn debuginfo_finalize(&self) { fn debuginfo_finalize(&self) {
//unimplemented!(); // TODO(antoyo)
} }
fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable { fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable {
@ -148,260 +75,9 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option<RValue<'gcc>>) -> Self::DIScope { fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option<RValue<'gcc>>) -> Self::DIScope {
unimplemented!(); unimplemented!();
/*let def_id = instance.def_id();
let containing_scope = get_containing_scope(self, instance);
let span = self.tcx.def_span(def_id);
let loc = self.lookup_debug_loc(span.lo());
let file_metadata = file_metadata(self, &loc.file);
let function_type_metadata = unsafe {
let fn_signature = get_function_signature(self, fn_abi);
llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
};
// Find the enclosing function, in case this is a closure.
let def_key = self.tcx().def_key(def_id);
let mut name = def_key.disambiguated_data.data.to_string();
let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id);
// Get_template_parameters() will append a `<...>` clause to the function
// name if necessary.
let generics = self.tcx().generics_of(enclosing_fn_def_id);
let substs = instance.substs.truncate_to(self.tcx(), generics);
let template_parameters = get_template_parameters(self, &generics, substs, &mut name);
let linkage_name = &mangled_name_of_instance(self, instance).name;
// Omit the linkage_name if it is the same as subprogram name.
let linkage_name = if &name == linkage_name { "" } else { linkage_name };
// FIXME(eddyb) does this need to be separate from `loc.line` for some reason?
let scope_line = loc.line;
let mut flags = DIFlags::FlagPrototyped;
if fn_abi.ret.layout.abi.is_uninhabited() {
flags |= DIFlags::FlagNoReturn;
}
let mut spflags = DISPFlags::SPFlagDefinition;
if is_node_local_to_unit(self, def_id) {
spflags |= DISPFlags::SPFlagLocalToUnit;
}
if self.sess().opts.optimize != config::OptLevel::No {
spflags |= DISPFlags::SPFlagOptimized;
}
if let Some((id, _)) = self.tcx.entry_fn(LOCAL_CRATE) {
if id.to_def_id() == def_id {
spflags |= DISPFlags::SPFlagMainSubprogram;
}
}
unsafe {
return llvm::LLVMRustDIBuilderCreateFunction(
DIB(self),
containing_scope,
name.as_ptr().cast(),
name.len(),
linkage_name.as_ptr().cast(),
linkage_name.len(),
file_metadata,
loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
function_type_metadata,
scope_line.unwrap_or(UNKNOWN_LINE_NUMBER),
flags,
spflags,
maybe_definition_llfn,
template_parameters,
None,
);
}
fn get_function_signature<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
) -> &'ll DIArray {
if cx.sess().opts.debuginfo == DebugInfo::Limited {
return create_DIArray(DIB(cx), &[]);
}
let mut signature = Vec::with_capacity(fn_abi.args.len() + 1);
// Return type -- llvm::DIBuilder wants this at index 0
signature.push(if fn_abi.ret.is_ignore() {
None
} else {
Some(type_metadata(cx, fn_abi.ret.layout.ty, rustc_span::DUMMY_SP))
});
// Arguments types
if cx.sess().target.options.is_like_msvc {
// FIXME(#42800):
// There is a bug in MSDIA that leads to a crash when it encounters
// a fixed-size array of `u8` or something zero-sized in a
// function-type (see #40477).
// As a workaround, we replace those fixed-size arrays with a
// pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would
// appear as `fn foo(a: u8, b: *const u8)` in debuginfo,
// and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`.
// This transformed type is wrong, but these function types are
// already inaccurate due to ABI adjustments (see #42800).
signature.extend(fn_abi.args.iter().map(|arg| {
let t = arg.layout.ty;
let t = match t.kind() {
ty::Array(ct, _)
if (*ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() =>
{
cx.tcx.mk_imm_ptr(ct)
}
_ => t,
};
Some(type_metadata(cx, t, rustc_span::DUMMY_SP))
}));
} else {
signature.extend(
fn_abi
.args
.iter()
.map(|arg| Some(type_metadata(cx, arg.layout.ty, rustc_span::DUMMY_SP))),
);
}
create_DIArray(DIB(cx), &signature[..])
}
fn get_template_parameters<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
generics: &ty::Generics,
substs: SubstsRef<'tcx>,
name_to_append_suffix_to: &mut String,
) -> &'ll DIArray {
if substs.types().next().is_none() {
return create_DIArray(DIB(cx), &[]);
}
name_to_append_suffix_to.push('<');
for (i, actual_type) in substs.types().enumerate() {
if i != 0 {
name_to_append_suffix_to.push(',');
}
let actual_type =
cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type);
// Add actual type name to <...> clause of function name
let actual_type_name = compute_debuginfo_type_name(cx.tcx(), actual_type, true);
name_to_append_suffix_to.push_str(&actual_type_name[..]);
}
name_to_append_suffix_to.push('>');
// Again, only create type information if full debuginfo is enabled
let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
let names = get_parameter_names(cx, generics);
substs
.iter()
.zip(names)
.filter_map(|(kind, name)| {
if let GenericArgKind::Type(ty) = kind.unpack() {
let actual_type =
cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
let actual_type_metadata =
type_metadata(cx, actual_type, rustc_span::DUMMY_SP);
let name = name.as_str();
Some(unsafe {
Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
DIB(cx),
None,
name.as_ptr().cast(),
name.len(),
actual_type_metadata,
))
})
} else {
None
}
})
.collect()
} else {
vec![]
};
create_DIArray(DIB(cx), &template_params[..])
}
fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
let mut names = generics
.parent
.map_or(vec![], |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
names.extend(generics.params.iter().map(|param| param.name));
names
}
fn get_containing_scope<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
) -> &'ll DIScope {
// First, let's see if this is a method within an inherent impl. Because
// if yes, we want to make the result subroutine DIE a child of the
// subroutine's self-type.
let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| {
// If the method does *not* belong to a trait, proceed
if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
instance.substs,
ty::ParamEnv::reveal_all(),
&cx.tcx.type_of(impl_def_id),
);
// Only "class" methods are generally understood by LLVM,
// so avoid methods on other types (e.g., `<*mut T>::null`).
match impl_self_ty.kind() {
ty::Adt(def, ..) if !def.is_box() => {
// Again, only create type information if full debuginfo is enabled
if cx.sess().opts.debuginfo == DebugInfo::Full
&& !impl_self_ty.needs_subst()
{
Some(type_metadata(cx, impl_self_ty, rustc_span::DUMMY_SP))
} else {
Some(namespace::item_namespace(cx, def.did))
}
}
_ => None,
}
} else {
// For trait method impls we still use the "parallel namespace"
// strategy
None
}
});
self_type.unwrap_or_else(|| {
namespace::item_namespace(
cx,
DefId {
krate: instance.def_id().krate,
index: cx
.tcx
.def_key(instance.def_id())
.parent
.expect("get_containing_scope: missing parent?"),
},
)
})
}*/
} }
fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option<Self::DILocation>, _span: Span) -> Self::DILocation { fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option<Self::DILocation>, _span: Span) -> Self::DILocation {
unimplemented!(); unimplemented!();
/*let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
unsafe {
llvm::LLVMRustDIBuilderCreateDebugLocation(
utils::debug_context(self).llcontext,
line.unwrap_or(UNKNOWN_LINE_NUMBER),
col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
scope,
inlined_at,
)
}*/
} }
} }

View File

@ -35,7 +35,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
} }
pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> RValue<'gcc> { pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> RValue<'gcc> {
//debug!("declare_global_with_linkage(name={:?})", name);
let global = self.context.new_global(None, linkage, ty, name) let global = self.context.new_global(None, linkage, ty, name)
.get_address(None); .get_address(None);
self.globals.borrow_mut().insert(name.to_string(), global); self.globals.borrow_mut().insert(name.to_string(), global);
@ -48,13 +47,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> { pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> {
self.linkage.set(FunctionType::Exported); self.linkage.set(FunctionType::Exported);
let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic); let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic);
// FIXME: this is a wrong cast. That requires changing the compiler API. // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
unsafe { std::mem::transmute(func) } unsafe { std::mem::transmute(func) }
} }
pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> RValue<'gcc> { pub fn declare_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> RValue<'gcc> {
//debug!("declare_global(name={:?})", name); // FIXME(antoyo): correctly support global variable initialization.
// FIXME: correctly support global variable initialization.
if name.starts_with(ARGV_INIT_ARRAY) { if name.starts_with(ARGV_INIT_ARRAY) {
// NOTE: hack to avoid having to update the names in mangled_std_symbols: we save the // NOTE: hack to avoid having to update the names in mangled_std_symbols: we save the
// name of the variable now to actually declare it later. // name of the variable now to actually declare it later.
@ -82,7 +80,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
} }
pub fn declare_cfn(&self, name: &str, _fn_type: Type<'gcc>) -> RValue<'gcc> { pub fn declare_cfn(&self, name: &str, _fn_type: Type<'gcc>) -> RValue<'gcc> {
// TODO: use the fn_type parameter. // TODO(antoyo): use the fn_type parameter.
let const_string = self.context.new_type::<u8>().make_pointer().make_pointer(); let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
let return_type = self.type_i32(); let return_type = self.type_i32();
let variadic = false; let variadic = false;
@ -91,7 +89,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
// NOTE: it is needed to set the current_func here as well, because get_fn() is not called // NOTE: it is needed to set the current_func here as well, because get_fn() is not called
// for the main function. // for the main function.
*self.current_func.borrow_mut() = Some(func); *self.current_func.borrow_mut() = Some(func);
// FIXME: this is a wrong cast. That requires changing the compiler API. // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
unsafe { std::mem::transmute(func) } unsafe { std::mem::transmute(func) }
} }
@ -128,11 +126,9 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
self.global_names.borrow_mut().insert(global, global_name.to_string()); self.global_names.borrow_mut().insert(global, global_name.to_string());
self.argv_initialized.set(true); self.argv_initialized.set(true);
} }
//debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
let (return_type, params, variadic) = fn_abi.gcc_type(self); let (return_type, params, variadic) = fn_abi.gcc_type(self);
let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, &params, variadic); let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, &params, variadic);
//fn_abi.apply_attrs_llfn(self, func); // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
// FIXME: this is a wrong cast. That requires changing the compiler API.
unsafe { std::mem::transmute(func) } unsafe { std::mem::transmute(func) }
} }
@ -146,19 +142,9 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
} }
pub fn get_declared_value(&self, name: &str) -> Option<RValue<'gcc>> { pub fn get_declared_value(&self, name: &str) -> Option<RValue<'gcc>> {
//debug!("get_declared_value(name={:?})", name); // TODO(antoyo): use a different field than globals, because this seems to return a function?
// TODO: use a different field than globals, because this seems to return a function?
self.globals.borrow().get(name).cloned() self.globals.borrow().get(name).cloned()
} }
/*fn get_defined_value(&self, name: &str) -> Option<RValue<'gcc>> {
// TODO: gcc does not allow global initialization.
None
/*self.get_declared_value(name).and_then(|val| {
let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 };
if !declaration { Some(val) } else { None }
})*/
}*/
} }
/// Declare a function. /// Declare a function.
@ -166,11 +152,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
/// If theres a value with the same name already declared, the function will /// If theres a value with the same name already declared, the function will
/// update the declaration and return existing Value instead. /// update the declaration and return existing Value instead.
fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> { fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> {
//debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
/*let llfn = unsafe {
llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty)
};*/
if name.starts_with("llvm.") { if name.starts_with("llvm.") {
return llvm::intrinsic(name, cx); return llvm::intrinsic(name, cx);
} }
@ -180,32 +161,24 @@ fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*ll
} }
else { else {
let params: Vec<_> = param_types.into_iter().enumerate() let params: Vec<_> = param_types.into_iter().enumerate()
.map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO: set name. .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name.
.collect(); .collect();
let func = cx.context.new_function(None, cx.linkage.get(), return_type, &params, mangle_name(name), variadic); let func = cx.context.new_function(None, cx.linkage.get(), return_type, &params, mangle_name(name), variadic);
cx.functions.borrow_mut().insert(name.to_string(), func); cx.functions.borrow_mut().insert(name.to_string(), func);
func func
}; };
//llvm::SetFunctionCallConv(llfn, callconv); // TODO // TODO(antoyo): set function calling convention.
// Function addresses in Rust are never significant, allowing functions to // TODO(antoyo): set unnamed address.
// be merged. // TODO(antoyo): set no red zone function attribute.
//llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global); // TODO // TODO(antoyo): set attributes for optimisation.
// TODO(antoyo): set attributes for non lazy bind.
/*if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) { // FIXME(antoyo): invalid cast.
llvm::Attribute::NoRedZone.apply_llfn(Function, llfn);
}*/
//attributes::default_optimisation_attrs(cx.tcx.sess, llfn);
//attributes::non_lazy_bind(cx.sess(), llfn);
// FIXME: invalid cast.
// TODO: is this line useful?
//cx.globals.borrow_mut().insert(name.to_string(), unsafe { std::mem::transmute(func) });
func func
} }
// FIXME: this is a hack because libgccjit currently only supports alpha, num and _. // FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _.
// Unsupported characters: `$` and `.`. // Unsupported characters: `$` and `.`.
pub fn mangle_name(name: &str) -> String { pub fn mangle_name(name: &str) -> String {
name.replace(|char: char| { name.replace(|char: char| {

View File

@ -11,16 +11,12 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
cx.functions.borrow_mut().insert(gcc_name.to_string(), func); cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
return func; return func;
}, },
// TODO: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
"llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd", "llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd",
"llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd", "llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd",
"llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128", "llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128",
_ => unimplemented!("unsupported LLVM intrinsic {}", name) _ => unimplemented!("unsupported LLVM intrinsic {}", name)
}; };
println!("Get target builtin");
unimplemented!(); unimplemented!();
/*let func = cx.context.get_target_builtin_function(gcc_name);
cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
func*/
} }

View File

@ -96,7 +96,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
let llval = let llval =
match name { match name {
_ if simple.is_some() => { _ if simple.is_some() => {
// FIXME: remove this cast when the API supports function. // FIXME(antoyo): remove this cast when the API supports function.
let func = unsafe { std::mem::transmute(simple.expect("simple")) }; let func = unsafe { std::mem::transmute(simple.expect("simple")) };
self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None) self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
}, },
@ -118,40 +118,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
sym::breakpoint => { sym::breakpoint => {
unimplemented!(); unimplemented!();
/*let llfn = self.get_intrinsic(&("llvm.debugtrap"));
self.call(llfn, &[], None)*/
} }
sym::va_copy => { sym::va_copy => {
unimplemented!(); unimplemented!();
/*let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)*/
} }
sym::va_arg => { sym::va_arg => {
unimplemented!(); unimplemented!();
/*match fn_abi.ret.layout.abi {
abi::Abi::Scalar(ref scalar) => {
match scalar.value {
Primitive::Int(..) => {
if self.cx().size_of(ret_ty).bytes() < 4 {
// `va_arg` should not be called on a integer type
// less than 4 bytes in length. If it is, promote
// the integer to a `i32` and truncate the result
// back to the smaller type.
let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
self.trunc(promoted_result, llret_ty)
} else {
emit_va_arg(self, args[0], ret_ty)
}
}
Primitive::F64 | Primitive::Pointer => {
emit_va_arg(self, args[0], ret_ty)
}
// `va_arg` should never be used with the return type f32.
Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
}
}
_ => bug!("the va_arg intrinsic does not work with non-scalar types"),
}*/
} }
sym::volatile_load | sym::unaligned_volatile_load => { sym::volatile_load | sym::unaligned_volatile_load => {
@ -161,15 +133,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self))); ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
} }
let load = self.volatile_load(ptr.get_type(), ptr); let load = self.volatile_load(ptr.get_type(), ptr);
// TODO // TODO(antoyo): set alignment.
/*let align = if name == sym::unaligned_volatile_load {
1
} else {
self.align_of(tp_ty).bytes() as u32
};
unsafe {
llvm::LLVMSetAlignment(load, align);
}*/
self.to_immediate(load, self.layout_of(tp_ty)) self.to_immediate(load, self.layout_of(tp_ty))
} }
sym::volatile_store => { sym::volatile_store => {
@ -187,24 +151,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
| sym::prefetch_read_instruction | sym::prefetch_read_instruction
| sym::prefetch_write_instruction => { | sym::prefetch_write_instruction => {
unimplemented!(); unimplemented!();
/*let expect = self.get_intrinsic(&("llvm.prefetch"));
let (rw, cache_type) = match name {
sym::prefetch_read_data => (0, 1),
sym::prefetch_write_data => (1, 1),
sym::prefetch_read_instruction => (0, 0),
sym::prefetch_write_instruction => (1, 0),
_ => bug!(),
};
self.call(
expect,
&[
args[0].immediate(),
self.const_i32(rw),
args[1].immediate(),
self.const_i32(cache_type),
],
None,
)*/
} }
sym::ctlz sym::ctlz
| sym::ctlz_nonzero | sym::ctlz_nonzero
@ -257,10 +203,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
self.block = Some(after_block); self.block = Some(after_block);
result.to_rvalue() result.to_rvalue()
/*let y = self.const_bool(false);
let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
self.call(llfn, &[args[0].immediate(), y], None)*/
} }
sym::ctlz_nonzero => { sym::ctlz_nonzero => {
self.count_leading_zeroes(width, args[0].immediate()) self.count_leading_zeroes(width, args[0].immediate())
@ -274,11 +216,11 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
args[0].immediate() // byte swap a u8/i8 is just a no-op args[0].immediate() // byte swap a u8/i8 is just a no-op
} }
else { else {
// TODO: check if it's faster to use string literals and a // TODO(antoyo): check if it's faster to use string literals and a
// match instead of format!. // match instead of format!.
let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width)); let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
let mut arg = args[0].immediate(); let mut arg = args[0].immediate();
// FIXME: this cast should not be necessary. Remove // FIXME(antoyo): this cast should not be necessary. Remove
// when having proper sized integer types. // when having proper sized integer types.
let param_type = bswap.get_param(0).to_rvalue().get_type(); let param_type = bswap.get_param(0).to_rvalue().get_type();
if param_type != arg.get_type() { if param_type != arg.get_type() {
@ -289,7 +231,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}, },
sym::bitreverse => self.bit_reverse(width, args[0].immediate()), sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
sym::rotate_left | sym::rotate_right => { sym::rotate_left | sym::rotate_right => {
// TODO: implement using algorithm from: // TODO(antoyo): implement using algorithm from:
// https://blog.regehr.org/archives/1063 // https://blog.regehr.org/archives/1063
// for other platforms. // for other platforms.
let is_left = name == sym::rotate_left; let is_left = name == sym::rotate_left;
@ -346,7 +288,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
self.const_bool(true) self.const_bool(true)
} }
/*else if use_integer_compare { /*else if use_integer_compare {
let integer_ty = self.type_ix(layout.size.bits()); // FIXME: LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits. let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
let ptr_ty = self.type_ptr_to(integer_ty); let ptr_ty = self.type_ptr_to(integer_ty);
let a_ptr = self.bitcast(a, ptr_ty); let a_ptr = self.bitcast(a, ptr_ty);
let a_val = self.load(integer_ty, a_ptr, layout.align.abi); let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
@ -396,38 +338,27 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
} }
fn assume(&mut self, value: Self::Value) { fn assume(&mut self, value: Self::Value) {
// TODO: switch to asumme when it exists. // TODO(antoyo): switch to asumme when it exists.
// Or use something like this: // Or use something like this:
// #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
self.expect(value, true); self.expect(value, true);
} }
fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value { fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
// TODO // TODO(antoyo)
/*let expect = self.context.get_builtin_function("__builtin_expect");
let expect: RValue<'gcc> = unsafe { std::mem::transmute(expect) };
self.call(expect, &[cond, self.const_bool(expected)], None)*/
cond cond
} }
fn sideeffect(&mut self) { fn sideeffect(&mut self) {
// TODO // TODO(antoyo)
/*if self.tcx().sess.opts.debugging_opts.insert_sideeffect {
let fnname = self.get_intrinsic(&("llvm.sideeffect"));
self.call(fnname, &[], None);
}*/
} }
fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> { fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*let intrinsic = self.cx().get_intrinsic("llvm.va_start");
self.call(intrinsic, &[va_list], None)*/
} }
fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> { fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
unimplemented!(); unimplemented!();
/*let intrinsic = self.cx().get_intrinsic("llvm.va_end");
self.call(intrinsic, &[va_list], None)*/
} }
} }
@ -634,7 +565,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
step4 step4
}, },
32 => { 32 => {
// TODO: Refactor with other implementations. // TODO(antoyo): Refactor with other implementations.
// First step. // First step.
let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555)); let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
let left = self.shl(left, context.new_rvalue_from_long(typ, 1)); let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
@ -681,7 +612,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
// Second step. // Second step.
let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF)); let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
let left = self.shl(left, context.new_rvalue_from_long(typ, 15)); let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO: transmute the number instead? let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead?
let right = self.lshr(right, context.new_rvalue_from_long(typ, 17)); let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
let step2 = self.or(left, right); let step2 = self.or(left, right);
@ -715,7 +646,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
step5 step5
}, },
128 => { 128 => {
// TODO: find a more efficient implementation? // TODO(antoyo): find a more efficient implementation?
let sixty_four = self.context.new_rvalue_from_long(typ, 64); let sixty_four = self.context.new_rvalue_from_long(typ, 64);
let high = self.context.new_cast(None, value >> sixty_four, self.u64_type); let high = self.context.new_cast(None, value >> sixty_four, self.u64_type);
let low = self.context.new_cast(None, value, self.u64_type); let low = self.context.new_cast(None, value, self.u64_type);
@ -735,7 +666,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
} }
fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> { fn count_leading_zeroes(&self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
// TODO: use width? // TODO(antoyo): use width?
let arg_type = arg.get_type(); let arg_type = arg.get_type();
let count_leading_zeroes = let count_leading_zeroes =
if arg_type.is_uint(&self.cx) { if arg_type.is_uint(&self.cx) {
@ -873,11 +804,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
} }
fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> { fn pop_count(&self, value: RValue<'gcc>) -> RValue<'gcc> {
// TODO: use the optimized version with fewer operations. // TODO(antoyo): use the optimized version with fewer operations.
let value_type = value.get_type(); let value_type = value.get_type();
if value_type.is_u128(&self.cx) { if value_type.is_u128(&self.cx) {
// TODO: implement in the normal algorithm below to have a more efficient // TODO(antoyo): implement in the normal algorithm below to have a more efficient
// implementation (that does not require a call to __popcountdi2). // implementation (that does not require a call to __popcountdi2).
let popcount = self.context.get_builtin_function("__builtin_popcountll"); let popcount = self.context.get_builtin_function("__builtin_popcountll");
let sixty_four = self.context.new_rvalue_from_long(value_type, 64); let sixty_four = self.context.new_rvalue_from_long(value_type, 64);
@ -1083,204 +1014,8 @@ fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<
} }
else if wants_msvc_seh(bx.sess()) { else if wants_msvc_seh(bx.sess()) {
unimplemented!(); unimplemented!();
//codegen_msvc_try(bx, try_func, data, catch_func, dest);
} }
else { else {
unimplemented!(); unimplemented!();
//codegen_gnu_try(bx, try_func, data, catch_func, dest);
} }
} }
// MSVC's definition of the `rust_try` function.
//
// This implementation uses the new exception handling instructions in LLVM
// which have support in LLVM for SEH on MSVC targets. Although these
// instructions are meant to work for all targets, as of the time of this
// writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized.
/*fn codegen_msvc_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
unimplemented!();
/*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
bx.set_personality_fn(bx.eh_personality());
bx.sideeffect();
let mut normal = bx.build_sibling_block("normal");
let mut catchswitch = bx.build_sibling_block("catchswitch");
let mut catchpad = bx.build_sibling_block("catchpad");
let mut caught = bx.build_sibling_block("caught");
let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
// We're generating an IR snippet that looks like:
//
// declare i32 @rust_try(%try_func, %data, %catch_func) {
// %slot = alloca u8*
// invoke %try_func(%data) to label %normal unwind label %catchswitch
//
// normal:
// ret i32 0
//
// catchswitch:
// %cs = catchswitch within none [%catchpad] unwind to caller
//
// catchpad:
// %tok = catchpad within %cs [%type_descriptor, 0, %slot]
// %ptr = load %slot
// call %catch_func(%data, %ptr)
// catchret from %tok to label %caught
//
// caught:
// ret i32 1
// }
//
// This structure follows the basic usage of throw/try/catch in LLVM.
// For example, compile this C++ snippet to see what LLVM generates:
//
// #include <stdint.h>
//
// struct rust_panic {
// rust_panic(const rust_panic&);
// ~rust_panic();
//
// uint64_t x[2];
// };
//
// int __rust_try(
// void (*try_func)(void*),
// void *data,
// void (*catch_func)(void*, void*) noexcept
// ) {
// try {
// try_func(data);
// return 0;
// } catch(rust_panic& a) {
// catch_func(data, &a);
// return 1;
// }
// }
//
// More information can be found in libstd's seh.rs implementation.
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(bx.type_i8p(), ptr_align);
bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
normal.ret(bx.const_i32(0));
let cs = catchswitch.catch_switch(None, None, 1);
catchswitch.add_handler(cs, catchpad.llbb());
// We can't use the TypeDescriptor defined in libpanic_unwind because it
// might be in another DLL and the SEH encoding only supports specifying
// a TypeDescriptor from the current module.
//
// However this isn't an issue since the MSVC runtime uses string
// comparison on the type name to match TypeDescriptors rather than
// pointer equality.
//
// So instead we generate a new TypeDescriptor in each module that uses
// `try` and let the linker merge duplicate definitions in the same
// module.
//
// When modifying, make sure that the type_name string exactly matches
// the one used in src/libpanic_unwind/seh.rs.
let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
let type_name = bx.const_bytes(b"rust_panic\0");
let type_info =
bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
unsafe {
llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
llvm::SetUniqueComdat(bx.llmod, tydesc);
llvm::LLVMSetInitializer(tydesc, type_info);
}
// The flag value of 8 indicates that we are catching the exception by
// reference instead of by value. We can't use catch by value because
// that requires copying the exception object, which we don't support
// since our exception object effectively contains a Box.
//
// Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
let flags = bx.const_i32(8);
let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = catchpad.load(slot, ptr_align);
catchpad.call(catch_func, &[data, ptr], Some(&funclet));
catchpad.catch_ret(&funclet, caught.llbb());
caught.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);*/
}*/
// Definition of the standard `try` function for Rust using the GNU-like model
// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
// instructions).
//
// This codegen is a little surprising because we always call a shim
// function instead of inlining the call to `invoke` manually here. This is done
// because in LLVM we're only allowed to have one personality per function
// definition. The call to the `try` intrinsic is being inlined into the
// function calling it, and that function may already have other personality
// functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function.
/*fn codegen_gnu_try<'a, 'gcc, 'tcx>(_bx: &mut Builder<'a, 'gcc, 'tcx>, _try_func: RValue<'gcc>, _data: RValue<'gcc>, _catch_func: RValue<'gcc>, _dest: RValue<'gcc>) {
unimplemented!();
/*let llfn = get_rust_try_fn(bx, &mut |mut bx| {
// Codegens the shims described above:
//
// bx:
// invoke %try_func(%data) normal %normal unwind %catch
//
// normal:
// ret 0
//
// catch:
// (%ptr, _) = landingpad
// call %catch_func(%data, %ptr)
// ret 1
bx.sideeffect();
let mut then = bx.build_sibling_block("then");
let mut catch = bx.build_sibling_block("catch");
let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2);
bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
then.ret(bx.const_i32(0));
// Type indicator for the exception being thrown.
//
// The first value in this tuple is a pointer to the exception object
// being thrown. The second value is a "selector" indicating which of
// the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector.
let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
Some(tydesc) => {
let tydesc = bx.get_static(tydesc);
bx.bitcast(tydesc, bx.type_i8p())
}
None => bx.const_null(bx.type_i8p()),
};
catch.add_clause(vals, tydesc);
let ptr = catch.extract_value(vals, 0);
catch.call(catch_func, &[data, ptr], None);
catch.ret(bx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = bx.call(llfn, &[try_func, data, catch_func], None);
let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align);*/
}*/

View File

@ -12,8 +12,6 @@ use rustc_span::{Span, Symbol, sym};
use crate::builder::Builder; use crate::builder::Builder;
pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result<RValue<'gcc>, ()> { pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result<RValue<'gcc>, ()> {
//println!("Generic simd: {}", name);
// macros for error handling: // macros for error handling:
macro_rules! emit_error { macro_rules! emit_error {
($msg: tt) => { ($msg: tt) => {
@ -56,33 +54,6 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
let arg_tys = sig.inputs(); let arg_tys = sig.inputs();
let name_str = &*name.as_str(); let name_str = &*name.as_str();
/*if name == sym::simd_select_bitmask {
let in_ty = arg_tys[0];
let m_len = match in_ty.kind() {
// Note that this `.unwrap()` crashes for isize/usize, that's sort
// of intentional as there's not currently a use case for that.
ty::Int(i) => i.bit_width().unwrap(),
ty::Uint(i) => i.bit_width().unwrap(),
_ => return_error!("`{}` is not an integral type", in_ty),
};
require_simd!(arg_tys[1], "argument");
let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
require!(
// Allow masks for vectors with fewer than 8 elements to be
// represented with a u8 or i8.
m_len == v_len || (m_len == 8 && v_len < 8),
"mismatched lengths: mask length `{}` != other vector length `{}`",
m_len,
v_len
);
let i1 = bx.type_i1();
let im = bx.type_ix(v_len);
let i1xn = bx.type_vector(i1, v_len);
let m_im = bx.trunc(args[0].immediate(), im);
let m_i1s = bx.bitcast(m_im, i1xn);
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
}*/
// every intrinsic below takes a SIMD vector as its first argument // every intrinsic below takes a SIMD vector as its first argument
require_simd!(arg_tys[0], "input"); require_simd!(arg_tys[0], "input");
let in_ty = arg_tys[0]; let in_ty = arg_tys[0];
@ -153,37 +124,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
out_ty out_ty
); );
//let total_len = u128::from(in_len) * 2;
let vector = args[2].immediate(); let vector = args[2].immediate();
// TODO:
/*let indices: Option<Vec<_>> = (0..n)
.map(|i| {
let arg_idx = i;
let val = bx.const_get_vector_element(vector, i as u64);
match bx.const_to_opt_u128(val, true) {
None => {
emit_error!("shuffle index #{} is not a constant", arg_idx);
None
}
Some(idx) if idx >= total_len => {
emit_error!(
"shuffle index #{} is out of bounds (limit {})",
arg_idx,
total_len
);
None
}
Some(idx) => Some(bx.const_i32(idx as i32)),
}
})
.collect();
let indices = match indices {
Some(i) => i,
None => return Ok(bx.const_null(llret_ty)),
};*/
return Ok(bx.shuffle_vector( return Ok(bx.shuffle_vector(
args[0].immediate(), args[0].immediate(),
args[1].immediate(), args[1].immediate(),
@ -191,723 +133,6 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>,
)); ));
} }
/*if name == sym::simd_insert {
require!(
in_elem == arg_tys[2],
"expected inserted type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
arg_tys[2]
);
return Ok(bx.insert_element(
args[0].immediate(),
args[2].immediate(),
args[1].immediate(),
));
}
if name == sym::simd_extract {
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
}
if name == sym::simd_select {
let m_elem_ty = in_elem;
let m_len = in_len;
require_simd!(arg_tys[1], "argument");
let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
require!(
m_len == v_len,
"mismatched lengths: mask length `{}` != other vector length `{}`",
m_len,
v_len
);
match m_elem_ty.kind() {
ty::Int(_) => {}
_ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
}
// truncate the mask to a vector of i1s
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, m_len as u64);
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
}
if name == sym::simd_bitmask {
// The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
// vector mask and returns an unsigned integer containing the most
// significant bit (MSB) of each lane.
// If the vector has less than 8 lanes, an u8 is returned with zeroed
// trailing bits.
let expected_int_bits = in_len.max(8);
match ret_ty.kind() {
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
_ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
}
// Integer vector <i{in_bitwidth} x in_len>:
let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
ty::Int(i) => (
args[0].immediate(),
i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
),
ty::Uint(i) => (
args[0].immediate(),
i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
),
_ => return_error!(
"vector argument `{}`'s element type `{}`, expected integer element type",
in_ty,
in_elem
),
};
// Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
let shift_indices =
vec![
bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
in_len as _
];
let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
// Truncate vector to an <i1 x N>
let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
// Bitcast <i1 x N> to iN:
let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
// Zero-extend iN to the bitmask type:
return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
}
fn simd_simple_float_intrinsic<'a, 'gcc, 'tcx>(
name: Symbol,
in_elem: &::rustc_middle::ty::TyS<'_>,
in_ty: &::rustc_middle::ty::TyS<'_>,
in_len: u64,
bx: &mut Builder<'a, 'gcc, 'tcx>,
span: Span,
args: &[OperandRef<'tcx, RValue<'gcc>>],
) -> Result<RValue<'gcc>, ()> {
macro_rules! emit_error {
($msg: tt) => {
emit_error!($msg, )
};
($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error(
bx.sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
name, $($fmt)*));
}
}
macro_rules! return_error {
($($fmt: tt)*) => {
{
emit_error!($($fmt)*);
return Err(());
}
}
}
let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
let elem_ty = bx.cx.type_float_from_ty(*f);
match f.bit_width() {
32 => ("f32", elem_ty),
64 => ("f64", elem_ty),
_ => {
return_error!(
"unsupported element type `{}` of floating-point vector `{}`",
f.name_str(),
in_ty
);
}
}
} else {
return_error!("`{}` is not a floating-point type", in_ty);
};
let vec_ty = bx.type_vector(elem_ty, in_len);
let (intr_name, fn_ty) = match name {
sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
_ => return_error!("unrecognized intrinsic `{}`", name),
};
let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
let f = bx.declare_cfn(&llvm_name, fn_ty);
let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
Ok(c)
}
if std::matches!(
name,
sym::simd_ceil
| sym::simd_fabs
| sym::simd_fcos
| sym::simd_fexp2
| sym::simd_fexp
| sym::simd_flog10
| sym::simd_flog2
| sym::simd_flog
| sym::simd_floor
| sym::simd_fma
| sym::simd_fpow
| sym::simd_fpowi
| sym::simd_fsin
| sym::simd_fsqrt
| sym::simd_round
| sym::simd_trunc
) {
return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
}
// FIXME: use:
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
let p0s: String = "p0".repeat(no_pointers);
match *elem_ty.kind() {
ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
_ => unreachable!(),
}
}
fn gcc_vector_ty<'gcc>(
cx: &CodegenCx<'gcc, '_>,
elem_ty: Ty<'_>,
vec_len: u64,
mut no_pointers: usize,
) -> Type<'gcc> {
// FIXME: use cx.layout_of(ty).llvm_type() ?
let mut elem_ty = match *elem_ty.kind() {
ty::Int(v) => cx.type_int_from_ty(v),
ty::Uint(v) => cx.type_uint_from_ty(v),
ty::Float(v) => cx.type_float_from_ty(v),
_ => unreachable!(),
};
while no_pointers > 0 {
elem_ty = cx.type_ptr_to(elem_ty);
no_pointers -= 1;
}
cx.type_vector(elem_ty, vec_len)
}
if name == sym::simd_gather {
// simd_gather(values: <N x T>, pointers: <N x *_ T>,
// mask: <N x i{M}>) -> <N x T>
// * N: number of elements in the input vectors
// * T: type of the element to load
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
require_simd!(in_ty, "first");
require_simd!(arg_tys[1], "second");
require_simd!(arg_tys[2], "third");
require_simd!(ret_ty, "return");
// Of the same length:
let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"second",
in_len,
in_ty,
arg_tys[1],
out_len
);
require!(
in_len == out_len2,
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"third",
in_len,
in_ty,
arg_tys[2],
out_len2
);
// The return type must match the first argument type
require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
// This counts how many pointers
fn ptr_count(t: Ty<'_>) -> usize {
match t.kind() {
ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
match t.kind() {
ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match element_ty1.kind() {
ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
_ => {
require!(
false,
"expected element type `{}` of second argument `{}` \
to be a pointer to the element type `{}` of the first \
argument `{}`, found `{}` != `*_ {}`",
element_ty1,
arg_tys[1],
in_elem,
in_ty,
element_ty1,
in_elem
);
unreachable!();
}
};
assert!(pointer_count > 0);
assert_eq!(pointer_count - 1, ptr_count(element_ty0));
assert_eq!(underlying_ty, non_ptr(element_ty0));
// The element type of the third argument must be a signed integer type of any width:
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
match element_ty2.kind() {
ty::Int(_) => (),
_ => {
require!(
false,
"expected element type `{}` of third argument `{}` \
to be a signed integer type",
element_ty2,
arg_tys[2]
);
}
}
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, in_len);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
// Type of the vector of pointers:
let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count);
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
// Type of the vector of elements:
let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
let llvm_intrinsic =
format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
let f = bx.declare_cfn(
&llvm_intrinsic,
bx.type_func(
&[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
llvm_elem_vec_ty,
),
);
let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
return Ok(v);
}
if name == sym::simd_scatter {
// simd_scatter(values: <N x T>, pointers: <N x *mut T>,
// mask: <N x i{M}>) -> ()
// * N: number of elements in the input vectors
// * T: type of the element to load
// * M: any integer width is supported, will be truncated to i1
// All types must be simd vector types
require_simd!(in_ty, "first");
require_simd!(arg_tys[1], "second");
require_simd!(arg_tys[2], "third");
// Of the same length:
let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
require!(
in_len == element_len1,
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"second",
in_len,
in_ty,
arg_tys[1],
element_len1
);
require!(
in_len == element_len2,
"expected {} argument with length {} (same as input type `{}`), \
found `{}` with length {}",
"third",
in_len,
in_ty,
arg_tys[2],
element_len2
);
// This counts how many pointers
fn ptr_count(t: Ty<'_>) -> usize {
match t.kind() {
ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
match t.kind() {
ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match element_ty1.kind() {
ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
(ptr_count(element_ty1), non_ptr(element_ty1))
}
_ => {
require!(
false,
"expected element type `{}` of second argument `{}` \
to be a pointer to the element type `{}` of the first \
argument `{}`, found `{}` != `*mut {}`",
element_ty1,
arg_tys[1],
in_elem,
in_ty,
element_ty1,
in_elem
);
unreachable!();
}
};
assert!(pointer_count > 0);
assert_eq!(pointer_count - 1, ptr_count(element_ty0));
assert_eq!(underlying_ty, non_ptr(element_ty0));
// The element type of the third argument must be a signed integer type of any width:
match element_ty2.kind() {
ty::Int(_) => (),
_ => {
require!(
false,
"expected element type `{}` of third argument `{}` \
be a signed integer type",
element_ty2,
arg_tys[2]
);
}
}
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.type_i32();
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, in_len);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
let ret_t = bx.type_void();
// Type of the vector of pointers:
let llvm_pointer_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count);
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
// Type of the vector of elements:
let llvm_elem_vec_ty = gcc_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
let llvm_intrinsic =
format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
let f = bx.declare_cfn(
&llvm_intrinsic,
bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
);
let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
return Ok(v);
}
macro_rules! arith_red {
($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
$identity:expr) => {
if name == sym::$name {
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.$integer_reduce(args[0].immediate());
if $ordered {
// if overflow occurs, the result is the
// mathematical result modulo 2^n:
Ok(bx.$op(args[1].immediate(), r))
} else {
Ok(bx.$integer_reduce(args[0].immediate()))
}
}
ty::Float(f) => {
let acc = if $ordered {
// ordered arithmetic reductions take an accumulator
args[1].immediate()
} else {
// unordered arithmetic reductions use the identity accumulator
match f.bit_width() {
32 => bx.const_real(bx.type_f32(), $identity),
64 => bx.const_real(bx.type_f64(), $identity),
v => return_error!(
r#"
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
sym::$name,
in_ty,
in_elem,
v,
ret_ty
),
}
};
Ok(bx.$float_reduce(acc, args[0].immediate()))
}
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
sym::$name,
in_ty,
in_elem,
ret_ty
),
};
}
};
}
arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
arith_red!(
simd_reduce_add_unordered: vector_reduce_add,
vector_reduce_fadd_fast,
false,
add,
0.0
);
arith_red!(
simd_reduce_mul_unordered: vector_reduce_mul,
vector_reduce_fmul_fast,
false,
mul,
1.0
);
macro_rules! minmax_red {
($name:ident: $int_red:ident, $float_red:ident) => {
if name == sym::$name {
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
return match in_elem.kind() {
ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
sym::$name,
in_ty,
in_elem,
ret_ty
),
};
}
};
}
minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
macro_rules! bitwise_red {
($name:ident : $red:ident, $boolean:expr) => {
if name == sym::$name {
let input = if !$boolean {
require!(
ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem,
in_ty,
ret_ty
);
args[0].immediate()
} else {
match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {}
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
sym::$name,
in_ty,
in_elem,
ret_ty
),
}
// boolean reductions operate on vectors of i1s:
let i1 = bx.type_i1();
let i1xn = bx.type_vector(i1, in_len as u64);
bx.trunc(args[0].immediate(), i1xn)
};
return match in_elem.kind() {
ty::Int(_) | ty::Uint(_) => {
let r = bx.$red(input);
Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
}
_ => return_error!(
"unsupported {} from `{}` with element `{}` to `{}`",
sym::$name,
in_ty,
in_elem,
ret_ty
),
};
}
};
}
bitwise_red!(simd_reduce_and: vector_reduce_and, false);
bitwise_red!(simd_reduce_or: vector_reduce_or, false);
bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
bitwise_red!(simd_reduce_all: vector_reduce_and, true);
bitwise_red!(simd_reduce_any: vector_reduce_or, true);
if name == sym::simd_cast {
require_simd!(ret_ty, "return");
let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
require!(
in_len == out_len,
"expected return type with length {} (same as input type `{}`), \
found `{}` with length {}",
in_len,
in_ty,
ret_ty,
out_len
);
// casting cares about nominal type, not just structural type
if in_elem == out_elem {
return Ok(args[0].immediate());
}
enum Style {
Float,
Int(/* is signed? */ bool),
Unsupported,
}
let (in_style, in_width) = match in_elem.kind() {
// vectors of pointer-sized integers should've been
// disallowed before here, so this unwrap is safe.
ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};
let (out_style, out_width) = match out_elem.kind() {
ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
ty::Float(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0),
};
match (in_style, out_style) {
(Style::Int(in_is_signed), Style::Int(_)) => {
return Ok(match in_width.cmp(&out_width) {
Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => {
if in_is_signed {
bx.sext(args[0].immediate(), llret_ty)
} else {
bx.zext(args[0].immediate(), llret_ty)
}
}
});
}
(Style::Int(in_is_signed), Style::Float) => {
return Ok(if in_is_signed {
bx.sitofp(args[0].immediate(), llret_ty)
} else {
bx.uitofp(args[0].immediate(), llret_ty)
});
}
(Style::Float, Style::Int(out_is_signed)) => {
return Ok(if out_is_signed {
bx.fptosi(args[0].immediate(), llret_ty)
} else {
bx.fptoui(args[0].immediate(), llret_ty)
});
}
(Style::Float, Style::Float) => {
return Ok(match in_width.cmp(&out_width) {
Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
Ordering::Equal => args[0].immediate(),
Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
});
}
_ => { /* Unsupported. Fallthrough. */ }
}
require!(
false,
"unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
in_ty,
in_elem,
ret_ty,
out_elem
);
}*/
macro_rules! arith_binary { macro_rules! arith_binary {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => { ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == sym::$name { $(if name == sym::$name {
@ -934,68 +159,9 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
simd_shl: Uint, Int => shl; simd_shl: Uint, Int => shl;
simd_shr: Uint => lshr, Int => ashr; simd_shr: Uint => lshr, Int => ashr;
simd_and: Uint, Int => and; simd_and: Uint, Int => and;
simd_or: Uint, Int => or; // FIXME: calling or might not work on vectors. simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors.
simd_xor: Uint, Int => xor; simd_xor: Uint, Int => xor;
/*simd_fmax: Float => maxnum;
simd_fmin: Float => minnum;*/
} }
/*macro_rules! arith_unary {
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
$(if name == sym::$name {
match in_elem.kind() {
$($(ty::$p(_))|* => {
return Ok(bx.$call(args[0].immediate()))
})*
_ => {},
}
require!(false,
"unsupported operation on `{}` with element `{}`",
in_ty,
in_elem)
})*
}
}
arith_unary! {
simd_neg: Int => neg, Float => fneg;
}
if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
let lhs = args[0].immediate();
let rhs = args[1].immediate();
let is_add = name == sym::simd_saturating_add;
let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
let (signed, elem_width, elem_ty) = match *in_elem.kind() {
ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
_ => {
return_error!(
"expected element type `{}` of vector type `{}` \
to be a signed or unsigned integer type",
arg_tys[0].simd_size_and_type(bx.tcx()).1,
arg_tys[0]
);
}
};
let llvm_intrinsic = &format!(
"llvm.{}{}.sat.v{}i{}",
if signed { 's' } else { 'u' },
if is_add { "add" } else { "sub" },
in_len,
elem_width
);
let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
let f = bx.declare_cfn(
&llvm_intrinsic,
bx.type_func(&[vec_ty, vec_ty], vec_ty),
);
let v = bx.call(f, &[lhs, rhs], None);
return Ok(v);
}*/
unimplemented!("simd {}", name); unimplemented!("simd {}", name);
//span_bug!(span, "unknown SIMD intrinsic");
} }

View File

@ -1,10 +1,8 @@
/* /*
* TODO: support #[inline] attributes. * TODO(antoyo): support #[inline] attributes.
* TODO: support LTO. * TODO(antoyo): support LTO.
* *
* TODO: remove the local gccjit LD_LIBRARY_PATH in config.sh. * TODO(antoyo): remove the patches.
* TODO: remove the object dependency.
* TODO: remove the patches.
*/ */
#![feature(rustc_private, decl_macro, associated_type_bounds, never_type, trusted_len)] #![feature(rustc_private, decl_macro, associated_type_bounds, never_type, trusted_len)]
@ -13,13 +11,10 @@
#![warn(rust_2018_idioms)] #![warn(rust_2018_idioms)]
#![warn(unused_lifetimes)] #![warn(unused_lifetimes)]
/*extern crate flate2;
extern crate libc;*/
extern crate rustc_ast; extern crate rustc_ast;
extern crate rustc_codegen_ssa; extern crate rustc_codegen_ssa;
extern crate rustc_data_structures; extern crate rustc_data_structures;
extern crate rustc_errors; extern crate rustc_errors;
//extern crate rustc_fs_util;
extern crate rustc_hir; extern crate rustc_hir;
extern crate rustc_metadata; extern crate rustc_metadata;
extern crate rustc_middle; extern crate rustc_middle;
@ -53,7 +48,6 @@ mod mangled_std_symbols;
mod mono_item; mod mono_item;
mod type_; mod type_;
mod type_of; mod type_of;
mod va_arg;
use std::any::Any; use std::any::Any;
use std::sync::Arc; use std::sync::Arc;
@ -119,7 +113,7 @@ impl CodegenBackend for GccCodegenBackend {
fn link(&self, sess: &Session, mut codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorReported> { fn link(&self, sess: &Session, mut codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorReported> {
use rustc_codegen_ssa::back::link::link_binary; use rustc_codegen_ssa::back::link::link_binary;
if let Some(symbols) = codegen_results.crate_info.exported_symbols.get_mut(&CrateType::Dylib) { if let Some(symbols) = codegen_results.crate_info.exported_symbols.get_mut(&CrateType::Dylib) {
// TODO: remove when global initializer work without calling a function at runtime. // TODO:(antoyo): remove when global initializer work without calling a function at runtime.
// HACK: since this codegen add some symbols (e.g. __gccGlobalCrateInit) and the UI // HACK: since this codegen add some symbols (e.g. __gccGlobalCrateInit) and the UI
// tests load libstd.so as a dynamic library, and rustc use a version-script to specify // tests load libstd.so as a dynamic library, and rustc use a version-script to specify
// the symbols visibility, we add * to export all symbols. // the symbols visibility, we add * to export all symbols.
@ -159,7 +153,7 @@ impl ExtraBackendMethods for GccCodegenBackend {
} }
fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel) -> TargetMachineFactoryFn<Self> { fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel) -> TargetMachineFactoryFn<Self> {
// TODO: set opt level. // TODO(antoyo): set opt level.
Arc::new(|_| { Arc::new(|_| {
Ok(()) Ok(())
}) })
@ -171,8 +165,7 @@ impl ExtraBackendMethods for GccCodegenBackend {
fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> { fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> {
None None
// TODO // TODO(antoyo)
//llvm_util::tune_cpu(sess)
} }
} }
@ -197,7 +190,7 @@ pub struct GccContext {
} }
unsafe impl Send for GccContext {} unsafe impl Send for GccContext {}
// FIXME: that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here. // FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here.
unsafe impl Sync for GccContext {} unsafe impl Sync for GccContext {}
impl WriteBackendMethods for GccCodegenBackend { impl WriteBackendMethods for GccCodegenBackend {
@ -209,16 +202,13 @@ impl WriteBackendMethods for GccCodegenBackend {
type ThinBuffer = ThinBuffer; type ThinBuffer = ThinBuffer;
fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLTOInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> { fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLTOInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
// TODO: implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins. // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins.
// NOTE: implemented elsewhere. // NOTE: implemented elsewhere.
let module = let module =
match modules.remove(0) { match modules.remove(0) {
FatLTOInput::InMemory(module) => module, FatLTOInput::InMemory(module) => module,
FatLTOInput::Serialized { .. } => { FatLTOInput::Serialized { .. } => {
unimplemented!(); unimplemented!();
/*info!("pushing serialized module {:?}", name);
let buffer = SerializedModule::Local(buffer);
serialized_modules.push((buffer, CString::new(name).unwrap()));*/
} }
}; };
Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: vec![] }) Ok(LtoModuleCodegen::Fat { module: Some(module), _serialized_bitcode: vec![] })
@ -233,9 +223,6 @@ impl WriteBackendMethods for GccCodegenBackend {
} }
unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> { unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> {
//if cgcx.lto == Lto::Fat {
//module.module_llvm.context.add_driver_option("-flto");
//}
module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level)); module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
Ok(()) Ok(())
} }
@ -257,7 +244,7 @@ impl WriteBackendMethods for GccCodegenBackend {
} }
fn run_lto_pass_manager(_cgcx: &CodegenContext<Self>, _module: &ModuleCodegen<Self::Module>, _config: &ModuleConfig, _thin: bool) -> Result<(), FatalError> { fn run_lto_pass_manager(_cgcx: &CodegenContext<Self>, _module: &ModuleCodegen<Self::Module>, _config: &ModuleConfig, _thin: bool) -> Result<(), FatalError> {
// TODO // TODO(antoyo)
Ok(()) Ok(())
} }
@ -266,10 +253,6 @@ impl WriteBackendMethods for GccCodegenBackend {
} }
} }
/*fn target_triple(sess: &Session) -> target_lexicon::Triple {
sess.target.llvm_target.parse().unwrap()
}*/
/// This is the entrypoint for a hot plugged rustc_codegen_gccjit /// This is the entrypoint for a hot plugged rustc_codegen_gccjit
#[no_mangle] #[no_mangle]
pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> { pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
@ -306,11 +289,6 @@ fn handle_native(name: &str) -> &str {
} }
unimplemented!(); unimplemented!();
/*unsafe {
let mut len = 0;
let ptr = llvm::LLVMRustGetHostCPUName(&mut len);
str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
}*/
} }
pub fn target_cpu(sess: &Session) -> &str { pub fn target_cpu(sess: &Session) -> &str {
@ -327,14 +305,7 @@ pub fn target_features(sess: &Session) -> Vec<Symbol> {
}, },
) )
.filter(|_feature| { .filter(|_feature| {
/*if feature.starts_with("sse") { // TODO(antoyo): implement a way to get enabled feature in libgccjit.
return true;
}*/
// TODO: implement a way to get enabled feature in libgccjit.
//println!("Feature: {}", feature);
/*let llvm_feature = to_llvm_feature(sess, feature);
let cstr = CString::new(llvm_feature).unwrap();
unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) }*/
false false
}) })
.map(|feature| Symbol::intern(feature)) .map(|feature| Symbol::intern(feature))

View File

@ -26,12 +26,7 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
) )
}); });
// TODO // TODO(antoyo): set linkage and visibility.
/*unsafe {
llvm::LLVMRustSetLinkage(global, base::linkage_to_llvm(linkage));
llvm::LLVMRustSetVisibility(global, base::visibility_to_llvm(visibility));
}*/
self.instances.borrow_mut().insert(instance, global); self.instances.borrow_mut().insert(instance, global);
} }
@ -43,17 +38,8 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let _decl = self.declare_fn(symbol_name, &fn_abi); let _decl = self.declare_fn(symbol_name, &fn_abi);
//let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); //let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
// TODO: call set_link_section() to allow initializing argc/argv. // TODO(antoyo): call set_link_section() to allow initializing argc/argv.
//base::set_link_section(decl, &attrs); // TODO(antoyo): set unique comdat.
/*if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR { // TODO(antoyo): use inline attribute from there in linkage.set() above.
llvm::SetUniqueComdat(self.llmod, decl);
}*/
//debug!("predefine_fn: instance = {:?}", instance);
// TODO: use inline attribute from there in linkage.set() above:
//attributes::from_fn_attrs(self, decl, instance);
//self.instances.borrow_mut().insert(instance, decl);
} }
} }

View File

@ -14,6 +14,9 @@ use crate::type_of::LayoutGccExt;
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> { pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> {
// gcc only supports 1, 2, 4 or 8-byte integers. // gcc only supports 1, 2, 4 or 8-byte integers.
// FIXME(antoyo): this is misleading to use the next power of two as rustc_codegen_ssa
// sometimes use 96-bit numbers and the following code will give an integer of a different
// size.
let bytes = (num_bits / 8).next_power_of_two() as i32; let bytes = (num_bits / 8).next_power_of_two() as i32;
match bytes { match bytes {
1 => self.i8_type, 1 => self.i8_type,
@ -23,17 +26,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
16 => self.i128_type, 16 => self.i128_type,
_ => panic!("unexpected num_bits: {}", num_bits), _ => panic!("unexpected num_bits: {}", num_bits),
} }
/*
let bytes = (num_bits / 8).next_power_of_two() as i32;
println!("num_bits: {}, bytes: {}", num_bits, bytes);
self.context.new_int_type(bytes, true) // TODO: check if it is indeed a signed integer.
*/
} }
/*pub fn type_bool(&self) -> Type<'gcc> {
self.bool_type
}*/
pub fn type_void(&self) -> Type<'gcc> { pub fn type_void(&self) -> Type<'gcc> {
self.context.new_type::<()>() self.context.new_type::<()>()
} }
@ -67,39 +61,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
let ity = Integer::approximate_align(self, align); let ity = Integer::approximate_align(self, align);
self.type_from_integer(ity) self.type_from_integer(ity)
} }
/*pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> {
match t {
ty::IntTy::Isize => self.type_isize(),
ty::IntTy::I8 => self.type_i8(),
ty::IntTy::I16 => self.type_i16(),
ty::IntTy::I32 => self.type_i32(),
ty::IntTy::I64 => self.type_i64(),
ty::IntTy::I128 => self.type_i128(),
}
}
pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> {
match t {
ty::UintTy::Usize => self.type_isize(),
ty::UintTy::U8 => self.type_i8(),
ty::UintTy::U16 => self.type_i16(),
ty::UintTy::U32 => self.type_i32(),
ty::UintTy::U64 => self.type_i64(),
ty::UintTy::U128 => self.type_i128(),
}
}
pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> {
match t {
ty::FloatTy::F32 => self.type_f32(),
ty::FloatTy::F64 => self.type_f64(),
}
}
pub fn type_vector(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> {
self.context.new_vector_type(ty, len)
}*/
} }
impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
@ -151,9 +112,7 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let fields: Vec<_> = fields.iter().enumerate() let fields: Vec<_> = fields.iter().enumerate()
.map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index))) .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index)))
.collect(); .collect();
// TODO: use packed. // TODO(antoyo): use packed.
//let name = types.iter().map(|typ| format!("{:?}", typ)).collect::<Vec<_>>().join("_");
//let typ = self.context.new_struct_type(None, format!("struct{}", name), &fields).as_type();
let typ = self.context.new_struct_type(None, "struct", &fields).as_type(); let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
self.struct_types.borrow_mut().insert(types, typ); self.struct_types.borrow_mut().insert(types, typ);
typ typ
@ -167,21 +126,17 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
TypeKind::Vector TypeKind::Vector
} }
else { else {
// TODO // TODO(antoyo): support other types.
TypeKind::Void TypeKind::Void
} }
} }
fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> { fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
// TODO
/*assert_ne!(self.type_kind(ty), TypeKind::Function,
"don't call ptr_to on function types, use ptr_to_gcc_type on FnAbi instead"
);*/
ty.make_pointer() ty.make_pointer()
} }
fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
// TODO: use address_space // TODO(antoyo): use address_space
ty.make_pointer() ty.make_pointer()
} }
@ -202,7 +157,6 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn vector_length(&self, _ty: Type<'gcc>) -> usize { fn vector_length(&self, _ty: Type<'gcc>) -> usize {
unimplemented!(); unimplemented!();
//unsafe { llvm::LLVMGetVectorSize(ty) as usize }
} }
fn float_width(&self, typ: Type<'gcc>) -> usize { fn float_width(&self, typ: Type<'gcc>) -> usize {
@ -217,14 +171,7 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
else { else {
panic!("Cannot get width of float type {:?}", typ); panic!("Cannot get width of float type {:?}", typ);
} }
// TODO: support other sizes. // TODO(antoyo): support other sizes.
/*match self.type_kind(ty) {
TypeKind::Float => 32,
TypeKind::Double => 64,
TypeKind::X86_FP80 => 80,
TypeKind::FP128 | TypeKind::PPC_FP128 => 128,
_ => bug!("llvm_float_width called on a non-float type"),
}*/
} }
fn int_width(&self, typ: Type<'gcc>) -> u64 { fn int_width(&self, typ: Type<'gcc>) -> u64 {
@ -263,21 +210,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
} }
pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], _packed: bool) { pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], _packed: bool) {
// TODO: use packed. // TODO(antoyo): use packed.
let fields: Vec<_> = fields.iter().enumerate() let fields: Vec<_> = fields.iter().enumerate()
.map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index))) .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
.collect(); .collect();
typ.set_fields(None, &fields); typ.set_fields(None, &fields);
} }
/*fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> {
// TODO: use packed.
let fields: Vec<_> = fields.iter().enumerate()
.map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
.collect();
return self.context.new_struct_type(None, "unnamedStruct", &fields).as_type();
}*/
pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> { pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> {
self.context.new_opaque_struct_type(None, name) self.context.new_opaque_struct_type(None, name)
} }
@ -288,7 +227,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
// NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
// size of usize::MAX in test_binary_search, we workaround this by setting the size to // size of usize::MAX in test_binary_search, we workaround this by setting the size to
// zero for ZSTs. // zero for ZSTs.
// FIXME: fix gccjit API. // FIXME(antoyo): fix gccjit API.
len = 0; len = 0;
} }
} }
@ -305,7 +244,6 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
} }
pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) { pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) {
//debug!("struct_fields: {:#?}", layout);
let field_count = layout.fields.count(); let field_count = layout.fields.count();
let mut packed = false; let mut packed = false;
@ -319,23 +257,13 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset); layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
packed |= effective_field_align < field.align.abi; packed |= effective_field_align < field.align.abi;
/*debug!(
"struct_fields: {}: {:?} offset: {:?} target_offset: {:?} \
effective_field_align: {}",
i,
field,
offset,
target_offset,
effective_field_align.bytes()
);*/
assert!(target_offset >= offset); assert!(target_offset >= offset);
let padding = target_offset - offset; let padding = target_offset - offset;
let padding_align = prev_effective_align.min(effective_field_align); let padding_align = prev_effective_align.min(effective_field_align);
assert_eq!(offset.align_to(padding_align) + padding, target_offset); assert_eq!(offset.align_to(padding_align) + padding, target_offset);
result.push(cx.type_padding_filler(padding, padding_align)); result.push(cx.type_padding_filler(padding, padding_align));
//debug!(" padding before: {:?}", padding);
result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME: might need to check if the type is inside another, like Box<Type>. result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME(antoyo): might need to check if the type is inside another, like Box<Type>.
offset = target_offset + field.size; offset = target_offset + field.size;
prev_effective_align = effective_field_align; prev_effective_align = effective_field_align;
} }
@ -346,14 +274,8 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
let padding = layout.size - offset; let padding = layout.size - offset;
let padding_align = prev_effective_align; let padding_align = prev_effective_align;
assert_eq!(offset.align_to(padding_align) + padding, layout.size); assert_eq!(offset.align_to(padding_align) + padding, layout.size);
/*debug!(
"struct_fields: pad_bytes: {:?} offset: {:?} stride: {:?}",
padding, offset, layout.size
);*/
result.push(cx.type_padding_filler(padding, padding_align)); result.push(cx.type_padding_filler(padding, padding_align));
assert_eq!(result.len(), 1 + field_count * 2); assert_eq!(result.len(), 1 + field_count * 2);
} else {
//debug!("struct_fields: offset: {:?} stride: {:?}", offset, layout.size);
} }
(result, packed) (result, packed)

View File

@ -71,7 +71,7 @@ pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLa
// If `Some` is returned then a named struct is created in LLVM. Name collisions are // If `Some` is returned then a named struct is created in LLVM. Name collisions are
// avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that
// can improve perf. // can improve perf.
// FIXME: I don't think that's true for libgccjit. // FIXME(antoyo): I don't think that's true for libgccjit.
Some(String::new()) Some(String::new())
} }
_ => None, _ => None,
@ -144,6 +144,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
/// with the inner-most trailing unsized field using the "minimal unit" /// with the inner-most trailing unsized field using the "minimal unit"
/// of that field's type - this is useful for taking the address of /// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment. /// that field and ensuring the struct has the right alignment.
//TODO(antoyo): do we still need the set_fields parameter?
fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> { fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> {
if let Abi::Scalar(ref scalar) = self.abi { if let Abi::Scalar(ref scalar) = self.abi {
// Use a different cache for scalars because pointers to DSTs // Use a different cache for scalars because pointers to DSTs
@ -184,8 +185,6 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
return ty; return ty;
} }
//debug!("gcc_type({:#?})", self);
assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty); assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
// Make sure lifetimes are erased, to avoid generating distinct LLVM // Make sure lifetimes are erased, to avoid generating distinct LLVM
@ -204,22 +203,12 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
else { else {
uncached_gcc_type(cx, *self, &mut defer) uncached_gcc_type(cx, *self, &mut defer)
}; };
//debug!("--> mapped {:#?} to ty={:?}", self, ty);
cx.types.borrow_mut().insert((self.ty, variant_index), ty); cx.types.borrow_mut().insert((self.ty, variant_index), ty);
if let Some((ty, layout)) = defer { if let Some((ty, layout)) = defer {
//TODO: do we still need this conditions and the set_fields parameter?
//if set_fields {
let (fields, packed) = struct_fields(cx, layout); let (fields, packed) = struct_fields(cx, layout);
cx.set_struct_body(ty, &fields, packed); cx.set_struct_body(ty, &fields, packed);
/*}
else {
// Since we might be trying to generate a type containing another type which is not
// completely generated yet, we don't set the fields right now, but we save the
// type to set the fields later.
cx.types_with_fields_to_set.borrow_mut().insert(ty.as_type(), (ty, layout));
}*/
} }
ty ty
@ -255,7 +244,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
} }
fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> { fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
// TODO: remove llvm hack: // TODO(antoyo): remove llvm hack:
// HACK(eddyb) special-case fat pointers until LLVM removes // HACK(eddyb) special-case fat pointers until LLVM removes
// pointee types, to avoid bitcasting every `OperandRef::deref`. // pointee types, to avoid bitcasting every `OperandRef::deref`.
match self.ty.kind() { match self.ty.kind() {
@ -281,8 +270,8 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
// immediate, just like `bool` is typically `i8` in memory and only `i1` // immediate, just like `bool` is typically `i8` in memory and only `i1`
// when immediate. We need to load/store `bool` as `i8` to avoid // when immediate. We need to load/store `bool` as `i8` to avoid
// crippling LLVM optimizations or triggering other LLVM bugs with `i1`. // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
// TODO: this bugs certainly don't happen in this case since the bool type is used instead of i1. // TODO(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1.
if /*immediate &&*/ scalar.is_bool() { if scalar.is_bool() {
return cx.type_i1(); return cx.type_i1();
} }
@ -361,12 +350,10 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> { fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> {
unimplemented!(); unimplemented!();
//ty.gcc_type(self)
} }
fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> { fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
// FIXME: return correct type. // FIXME(antoyo): return correct type.
self.type_void() self.type_void()
//fn_abi.gcc_type(self)
} }
} }

View File

@ -1,179 +0,0 @@
/*use gccjit::{RValue, ToRValue, Type};
use rustc_codegen_ssa::mir::operand::OperandRef;
use rustc_codegen_ssa::{
common::IntPredicate,
traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
};
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::Ty;
use rustc_target::abi::{Align, Endian, HasDataLayout, LayoutOf, Size};
use crate::builder::Builder;
use crate::type_of::LayoutGccExt;
fn round_pointer_up_to_alignment<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, addr: RValue<'gcc>, align: Align, ptr_ty: Type<'gcc>) -> RValue<'gcc> {
let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
bx.inttoptr(ptr_as_int, ptr_ty)
}
fn emit_direct_ptr_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, llty: Type<'gcc>, size: Size, align: Align, slot_size: Align, allow_higher_align: bool) -> (RValue<'gcc>, Align) {
let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
let va_list_addr =
if list.layout.gcc_type(bx.cx, true) != va_list_ptr_ty {
bx.bitcast(list.immediate(), va_list_ptr_ty)
}
else {
list.immediate()
};
let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
let (addr, addr_align) = if allow_higher_align && align > slot_size {
(round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
} else {
(ptr, slot_size)
};
let aligned_size = size.align_to(slot_size).bytes() as i32;
let full_direct_size = bx.cx().const_i32(aligned_size);
let next = bx.inbounds_gep(addr, &[full_direct_size]);
bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
(bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
} else {
(bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
}
}
fn emit_ptr_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>, indirect: bool, slot_size: Align, allow_higher_align: bool) -> RValue<'gcc> {
let layout = bx.cx.layout_of(target_ty);
let (llty, size, align) =
if indirect {
(
bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).gcc_type(bx.cx, true),
bx.cx.data_layout().pointer_size,
bx.cx.data_layout().pointer_align,
)
}
else {
(layout.gcc_type(bx.cx, true), layout.size, layout.align)
};
let (addr, addr_align) = emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
if indirect {
let tmp_ret = bx.load(addr, addr_align);
bx.load(tmp_ret, align.abi)
}
else {
bx.load(addr, addr_align)
}
}
fn emit_aapcs_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, list: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>) -> RValue<'gcc> {
// Implementation of the AAPCS64 calling convention for va_args see
// https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
let va_list_addr = list.immediate();
let layout = bx.cx.layout_of(target_ty);
let gcc_type = layout.immediate_gcc_type(bx);
let function = bx.llbb().get_function();
let variable = function.new_local(None, gcc_type, "va_arg");
let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg");
let mut in_reg = bx.build_sibling_block("va_arg.in_reg");
let mut on_stack = bx.build_sibling_block("va_arg.on_stack");
let end = bx.build_sibling_block("va_arg.end");
let zero = bx.const_i32(0);
let offset_align = Align::from_bytes(4).unwrap();
assert!(bx.tcx().sess.target.endian == Endian::Little);
let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
let (reg_off, reg_top_index, slot_size) = if gr_type {
let gr_offs = bx.struct_gep(va_list_addr, 7);
let nreg = (layout.size.bytes() + 7) / 8;
(gr_offs, 3, nreg * 8)
} else {
let vr_off = bx.struct_gep(va_list_addr, 9);
let nreg = (layout.size.bytes() + 15) / 16;
(vr_off, 5, nreg * 16)
};
// if the offset >= 0 then the value will be on the stack
let mut reg_off_v = bx.load(reg_off, offset_align);
let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
bx.cond_br(use_stack, on_stack.llbb(), maybe_reg.llbb());
// The value at this point might be in a register, but there is a chance that
// it could be on the stack so we have to update the offset and then check
// the offset again.
if gr_type && layout.align.abi.bytes() > 8 {
reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(15));
reg_off_v = maybe_reg.and(reg_off_v, bx.const_i32(-16));
}
let new_reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(slot_size as i32));
maybe_reg.store(new_reg_off_v, reg_off, offset_align);
// Check to see if we have overflowed the registers as a result of this.
// If we have then we need to use the stack for this value
let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
maybe_reg.cond_br(use_stack, on_stack.llbb(), in_reg.llbb());
let top = in_reg.struct_gep(va_list_addr, reg_top_index);
let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi);
// reg_value = *(@top + reg_off_v);
let top = in_reg.gep(top, &[reg_off_v]);
let top = in_reg.bitcast(top, bx.cx.type_ptr_to(layout.gcc_type(bx, true)));
let reg_value = in_reg.load(top, layout.align.abi);
in_reg.assign(variable, reg_value);
in_reg.br(end.llbb());
// On Stack block
let stack_value =
emit_ptr_va_arg(&mut on_stack, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
on_stack.assign(variable, stack_value);
on_stack.br(end.llbb());
*bx = end;
variable.to_rvalue()
}
pub(super) fn emit_va_arg<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, addr: OperandRef<'tcx, RValue<'gcc>>, target_ty: Ty<'tcx>) -> RValue<'gcc> {
// Determine the va_arg implementation to use. The LLVM va_arg instruction
// is lacking in some instances, so we should only use it as a fallback.
let target = &bx.cx.tcx.sess.target;
let arch = &bx.cx.tcx.sess.target.arch;
match &**arch {
// Windows x86
"x86" if target.options.is_like_windows => {
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
}
// Generic x86
"x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true),
// Windows AArch64
"aarch64" if target.options.is_like_windows => {
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
}
// macOS / iOS AArch64
"aarch64" if target.options.is_like_osx => {
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
}
"aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
// Windows x86_64
"x86_64" if target.options.is_like_windows => {
let target_ty_size = bx.cx.size_of(target_ty).bytes();
let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
}
// For all other architecture/OS combinations fall back to using
// the LLVM va_arg instruction.
// https://llvm.org/docs/LangRef.html#va-arg-instruction
_ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).gcc_type(bx.cx, true)),
}
}*/

View File

@ -1,8 +1,7 @@
#!/bin/bash #!/bin/bash
# TODO: rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed? # TODO(antoyo): rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed?
#set -x
set -e set -e
export GCC_PATH=$(cat gcc_path) export GCC_PATH=$(cat gcc_path)
@ -30,17 +29,9 @@ $RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --targ
echo "[BUILD] example" echo "[BUILD] example"
$RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE $RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE
#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
#echo "[JIT] mini_core_hello_world"
#CG_CLIF_JIT=1 CG_CLIF_JIT_ARGS="abc bcd" $RUSTC --crate-type bin -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target $HOST_TRIPLE
#else
#echo "[JIT] mini_core_hello_world (skipped)"
#fi
echo "[AOT] mini_core_hello_world" echo "[AOT] mini_core_hello_world"
$RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE $RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE
$RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
# (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
echo "[BUILD] sysroot" echo "[BUILD] sysroot"
time ./build_sysroot/build_sysroot.sh time ./build_sysroot/build_sysroot.sh
@ -52,20 +43,12 @@ $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
echo "[AOT] alloc_system" echo "[AOT] alloc_system"
$RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE" $RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
# FIXME: this requires linking an additional lib for __popcountdi2 echo "[AOT] alloc_example"
#echo "[AOT] alloc_example" $RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
#$RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE $RUN_WRAPPER ./target/out/alloc_example
#$RUN_WRAPPER ./target/out/alloc_example
#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
#echo "[JIT] std_example"
#CG_CLIF_JIT=1 $RUSTC --crate-type bin -Cprefer-dynamic example/std_example.rs --target $HOST_TRIPLE
#else
#echo "[JIT] std_example (skipped)"
#fi
echo "[AOT] dst_field_align" echo "[AOT] dst_field_align"
# FIXME Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed. # FIXME(antoyo): Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
$RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE $RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE
$RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false) $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
@ -81,14 +64,14 @@ echo "[AOT] track-caller-attribute"
$RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE $RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
$RUN_WRAPPER ./target/out/track-caller-attribute $RUN_WRAPPER ./target/out/track-caller-attribute
# FIXME: this requires linking an additional lib for __popcountdi2 echo "[BUILD] mod_bench"
#echo "[BUILD] mod_bench" $RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
#$RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
# FIXME linker gives multiple definitions error on Linux # FIXME(antoyo): linker gives multiple definitions error on Linux
#echo "[BUILD] sysroot in release mode" #echo "[BUILD] sysroot in release mode"
#./build_sysroot/build_sysroot.sh --release #./build_sysroot/build_sysroot.sh --release
# TODO(antoyo): uncomment when it works.
#pushd simple-raytracer #pushd simple-raytracer
#if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then #if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
#echo "[BENCH COMPILE] ebobby/simple-raytracer" #echo "[BENCH COMPILE] ebobby/simple-raytracer"
@ -113,6 +96,7 @@ rm -r ./target || true
../../../../../cargo.sh test ../../../../../cargo.sh test
popd popd
# TODO(antoyo): uncomment when it works.
#pushd regex #pushd regex
#echo "[TEST] rust-lang/regex example shootout-regex-dna" #echo "[TEST] rust-lang/regex example shootout-regex-dna"
#../cargo.sh clean #../cargo.sh clean
@ -152,9 +136,6 @@ git fetch
git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(')
export RUSTFLAGS= export RUSTFLAGS=
#git apply ../rust_lang.patch
rm config.toml || true rm config.toml || true
cat > config.toml <<EOF cat > config.toml <<EOF
@ -182,16 +163,9 @@ for test in $(rg --files-with-matches "catch_unwind|should_panic|thread|lto" src
done done
git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs
git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs
rm src/test/ui/llvm-asm/llvm-asm-in-out-operand.rs || true # TODO: Enable back this test if I ever implement the llvm_asm! macro. rm src/test/ui/llvm-asm/llvm-asm-in-out-operand.rs || true # TODO(antoyo): Enable back this test if I ever implement the llvm_asm! macro.
#rm src/test/ui/consts/const-size_of-cycle.rs || true # Error file path difference
#rm src/test/ui/impl-trait/impl-generic-mismatch.rs || true # ^
#rm src/test/ui/type_length_limit.rs || true
#rm src/test/ui/issues/issue-50993.rs || true # Target `thumbv7em-none-eabihf` is not supported
#rm src/test/ui/macros/same-sequence-span.rs || true # Proc macro .rustc section not found?
#rm src/test/ui/suggestions/issue-61963.rs || true # ^
RUSTC_ARGS="-Zpanic-abort-tests -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort" RUSTC_ARGS="-Zpanic-abort-tests -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort"
echo "[TEST] rustc test suite" echo "[TEST] rustc test suite"
# TODO: remove excluded tests when they stop stalling. COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS"
COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS" --exclude src/test/ui/numbers-arithmetic/saturating-float-casts.rs --exclude src/test/ui/issues/issue-50811.rs