mirror of
https://github.com/rust-lang/rust.git
synced 2025-02-20 10:55:14 +00:00
fix(fmt/style): Remove unncessary clones, into's and deref's
This commit is contained in:
parent
47207949ae
commit
6f76488b2f
@ -92,7 +92,7 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
|
||||
let mut function_features = function_features
|
||||
.iter()
|
||||
.flat_map(|feat| to_gcc_features(cx.tcx.sess, feat).into_iter())
|
||||
.chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
|
||||
.chain(codegen_fn_attrs.instruction_set.iter().map(|x| match *x {
|
||||
InstructionSetAttr::ArmA32 => "-thumb-mode", // TODO(antoyo): support removing feature.
|
||||
InstructionSetAttr::ArmT32 => "thumb-mode",
|
||||
}))
|
||||
@ -118,8 +118,8 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
|
||||
|
||||
if feature.starts_with('-') {
|
||||
Some(format!("no{}", feature))
|
||||
} else if feature.starts_with('+') {
|
||||
Some(feature[1..].to_string())
|
||||
} else if let Some(stripped) = feature.strip_prefix('+') {
|
||||
Some(stripped.to_string())
|
||||
} else {
|
||||
Some(feature.to_string())
|
||||
}
|
||||
|
@ -128,8 +128,7 @@ fn prepare_lto(
|
||||
}
|
||||
|
||||
let archive_data = unsafe {
|
||||
Mmap::map(File::open(&path).expect("couldn't open rlib"))
|
||||
.expect("couldn't map rlib")
|
||||
Mmap::map(File::open(path).expect("couldn't open rlib")).expect("couldn't map rlib")
|
||||
};
|
||||
let archive = ArchiveFile::parse(&*archive_data).expect("wanted an rlib");
|
||||
let obj_files = archive
|
||||
|
@ -104,7 +104,7 @@ pub(crate) unsafe fn codegen(
|
||||
// FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
|
||||
// transmuting an rvalue to an lvalue.
|
||||
// Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
|
||||
context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
|
||||
context.dump_reproducer_to_file(format!("/tmp/reproducers/{}.c", module.name));
|
||||
println!("Dumped reproducer {}", module.name);
|
||||
}
|
||||
if env::var("CG_GCCJIT_DUMP_TO_FILE").as_deref() == Ok("1") {
|
||||
|
@ -135,7 +135,7 @@ pub fn compile_codegen_unit(
|
||||
|
||||
let target_cpu = gcc_util::target_cpu(tcx.sess);
|
||||
if target_cpu != "generic" {
|
||||
context.add_command_line_option(&format!("-march={}", target_cpu));
|
||||
context.add_command_line_option(format!("-march={}", target_cpu));
|
||||
}
|
||||
|
||||
if tcx
|
||||
|
@ -277,8 +277,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
.collect();
|
||||
|
||||
// NOTE: to take into account variadic functions.
|
||||
for i in casted_args.len()..args.len() {
|
||||
casted_args.push(args[i]);
|
||||
for arg in args.iter().skip(casted_args.len()) {
|
||||
casted_args.push(*arg);
|
||||
}
|
||||
|
||||
Cow::Owned(casted_args)
|
||||
@ -353,7 +353,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let function_address_names = self.function_address_names.borrow();
|
||||
let original_function_name = function_address_names.get(&func_ptr);
|
||||
llvm::adjust_intrinsic_arguments(
|
||||
&self,
|
||||
self,
|
||||
gcc_func,
|
||||
args.into(),
|
||||
&func_name,
|
||||
@ -361,7 +361,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
)
|
||||
};
|
||||
let args_adjusted = args.len() != previous_arg_count;
|
||||
let args = self.check_ptr_call("call", func_ptr, &*args);
|
||||
let args = self.check_ptr_call("call", func_ptr, &args);
|
||||
|
||||
// gccjit requires to use the result of functions, even when it's not used.
|
||||
// That's why we assign the result to a local or call add_eval().
|
||||
@ -373,7 +373,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
unsafe { RETURN_VALUE_COUNT += 1 };
|
||||
let return_value = self.cx.context.new_call_through_ptr(self.location, func_ptr, &args);
|
||||
let return_value = llvm::adjust_intrinsic_return_value(
|
||||
&self,
|
||||
self,
|
||||
return_value,
|
||||
&func_name,
|
||||
&args,
|
||||
@ -441,7 +441,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
self.block.add_assignment(
|
||||
self.location,
|
||||
result,
|
||||
self.cx.context.new_call(self.location, func, &args),
|
||||
self.cx.context.new_call(self.location, func, args),
|
||||
);
|
||||
result.to_rvalue()
|
||||
}
|
||||
@ -595,7 +595,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
) -> RValue<'gcc> {
|
||||
let try_block = self.current_func().new_block("try");
|
||||
|
||||
let current_block = self.block.clone();
|
||||
let current_block = self.block;
|
||||
self.block = try_block;
|
||||
let call = self.call(typ, fn_attrs, None, func, args, None); // TODO(antoyo): use funclet here?
|
||||
self.block = current_block;
|
||||
@ -1176,7 +1176,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
// NOTE: due to opaque pointers now being used, we need to cast here.
|
||||
let ptr = self.context.new_cast(self.location, ptr, typ.make_pointer());
|
||||
// NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified).
|
||||
let mut indices = indices.into_iter();
|
||||
let mut indices = indices.iter();
|
||||
let index = indices.next().expect("first index in inbounds_gep");
|
||||
let mut result = self.context.new_array_access(self.location, ptr, *index);
|
||||
for index in indices {
|
||||
@ -1684,7 +1684,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
|
||||
// FIXME(antoyo): this does not zero-extend.
|
||||
if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
|
||||
if value.get_type().is_bool() && dest_typ.is_i8(self.cx) {
|
||||
// FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
|
||||
// Fix the code in codegen_ssa::base::from_immediate.
|
||||
return value;
|
||||
@ -2057,7 +2057,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
self.context.new_rvalue_from_vector(self.location, mask_type, &vector_elements);
|
||||
let shifted = self.context.new_rvalue_vector_perm(self.location, res, res, mask);
|
||||
shift *= 2;
|
||||
res = op(res, shifted, &self.context);
|
||||
res = op(res, shifted, self.context);
|
||||
}
|
||||
self.context
|
||||
.new_vector_access(self.location, res, self.context.new_rvalue_zero(self.int_type))
|
||||
@ -2073,7 +2073,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc> {
|
||||
let loc = self.location.clone();
|
||||
let loc = self.location;
|
||||
self.vector_reduce(src, |a, b, context| context.new_binary_op(loc, op, a.get_type(), a, b))
|
||||
}
|
||||
|
||||
@ -2090,7 +2090,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
|
||||
let element_count = vector_type.get_num_units();
|
||||
(0..element_count)
|
||||
.into_iter()
|
||||
.map(|i| {
|
||||
self.context
|
||||
.new_vector_access(
|
||||
@ -2121,7 +2120,6 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
|
||||
let element_count = vector_type.get_num_units();
|
||||
(0..element_count)
|
||||
.into_iter()
|
||||
.map(|i| {
|
||||
self.context
|
||||
.new_vector_access(
|
||||
@ -2141,7 +2139,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
// Inspired by Hacker's Delight min implementation.
|
||||
pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let loc = self.location.clone();
|
||||
let loc = self.location;
|
||||
self.vector_reduce(src, |a, b, context| {
|
||||
let differences_or_zeros = difference_or_zero(loc, a, b, context);
|
||||
context.new_binary_op(loc, BinaryOp::Plus, b.get_type(), b, differences_or_zeros)
|
||||
@ -2150,7 +2148,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
// Inspired by Hacker's Delight max implementation.
|
||||
pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
|
||||
let loc = self.location.clone();
|
||||
let loc = self.location;
|
||||
self.vector_reduce(src, |a, b, context| {
|
||||
let differences_or_zeros = difference_or_zero(loc, a, b, context);
|
||||
context.new_binary_op(loc, BinaryOp::Minus, a.get_type(), a, differences_or_zeros)
|
||||
@ -2345,7 +2343,7 @@ impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
|
||||
|
||||
impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
|
||||
fn target_spec(&self) -> &Target {
|
||||
&self.cx.target_spec()
|
||||
self.cx.target_spec()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,7 +28,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
|
||||
|
||||
let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
|
||||
|
||||
let func = if let Some(_func) = cx.get_declared_value(&sym) {
|
||||
let func = if let Some(_func) = cx.get_declared_value(sym) {
|
||||
// FIXME(antoyo): we never reach this because get_declared_value only returns global variables
|
||||
// and here we try to get a function.
|
||||
unreachable!();
|
||||
@ -68,7 +68,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
|
||||
}*/
|
||||
} else {
|
||||
cx.linkage.set(FunctionType::Extern);
|
||||
let func = cx.declare_fn(&sym, &fn_abi);
|
||||
let func = cx.declare_fn(sym, fn_abi);
|
||||
|
||||
attributes::from_fn_attrs(cx, func, instance);
|
||||
|
||||
|
@ -21,7 +21,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
|
||||
fn global_string(&self, string: &str) -> LValue<'gcc> {
|
||||
// TODO(antoyo): handle non-null-terminated strings.
|
||||
let string = self.context.new_string_literal(&*string);
|
||||
let string = self.context.new_string_literal(string);
|
||||
let sym = self.generate_local_symbol_name("str");
|
||||
let global = self.declare_private_global(&sym, self.val_ty(string));
|
||||
global.global_set_initializer_rvalue(string);
|
||||
@ -170,7 +170,8 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||
return self
|
||||
.context
|
||||
.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
|
||||
} else if ty == self.double_type {
|
||||
}
|
||||
if ty == self.double_type {
|
||||
return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64));
|
||||
}
|
||||
|
||||
@ -293,7 +294,7 @@ impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
|
||||
} else if self.is_ulonglong(cx) {
|
||||
cx.longlong_type
|
||||
} else {
|
||||
self.clone()
|
||||
*self
|
||||
}
|
||||
}
|
||||
|
||||
@ -319,7 +320,7 @@ impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
|
||||
} else if self.is_longlong(cx) {
|
||||
cx.ulonglong_type
|
||||
} else {
|
||||
self.clone()
|
||||
*self
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -432,7 +433,7 @@ impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
|
||||
}
|
||||
|
||||
fn is_vector(&self) -> bool {
|
||||
let mut typ = self.clone();
|
||||
let mut typ = *self;
|
||||
loop {
|
||||
if typ.dyncast_vector().is_some() {
|
||||
return true;
|
||||
|
@ -66,7 +66,7 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
|
||||
fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
|
||||
let attrs = self.tcx.codegen_fn_attrs(def_id);
|
||||
|
||||
let value = match codegen_static_initializer(&self, def_id) {
|
||||
let value = match codegen_static_initializer(self, def_id) {
|
||||
Ok((value, _)) => value,
|
||||
// Error has already been reported
|
||||
Err(_) => return,
|
||||
@ -231,13 +231,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
|
||||
let global = self.declare_global(
|
||||
&sym,
|
||||
llty,
|
||||
GlobalKind::Exported,
|
||||
is_tls,
|
||||
fn_attrs.link_section,
|
||||
);
|
||||
let global =
|
||||
self.declare_global(sym, llty, GlobalKind::Exported, is_tls, fn_attrs.link_section);
|
||||
|
||||
if !self.tcx.is_reachable_non_generic(def_id) {
|
||||
#[cfg(feature = "master")]
|
||||
@ -246,7 +241,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
|
||||
global
|
||||
} else {
|
||||
check_and_apply_linkage(&self, &fn_attrs, ty, sym)
|
||||
check_and_apply_linkage(self, fn_attrs, ty, sym)
|
||||
};
|
||||
|
||||
if !def_id.is_local() {
|
||||
@ -367,11 +362,8 @@ fn check_and_apply_linkage<'gcc, 'tcx>(
|
||||
let gcc_type = cx.layout_of(ty).gcc_type(cx);
|
||||
if let Some(linkage) = attrs.import_linkage {
|
||||
// Declare a symbol `foo` with the desired linkage.
|
||||
let global1 = cx.declare_global_with_linkage(
|
||||
&sym,
|
||||
cx.type_i8(),
|
||||
base::global_linkage_to_gcc(linkage),
|
||||
);
|
||||
let global1 =
|
||||
cx.declare_global_with_linkage(sym, cx.type_i8(), base::global_linkage_to_gcc(linkage));
|
||||
|
||||
// Declare an internal global `extern_with_linkage_foo` which
|
||||
// is initialized with the address of `foo`. If `foo` is
|
||||
@ -380,7 +372,7 @@ fn check_and_apply_linkage<'gcc, 'tcx>(
|
||||
// `extern_with_linkage_foo` will instead be initialized to
|
||||
// zero.
|
||||
let mut real_name = "_rust_extern_with_linkage_".to_string();
|
||||
real_name.push_str(&sym);
|
||||
real_name.push_str(sym);
|
||||
let global2 = cx.define_global(&real_name, gcc_type, is_tls, attrs.link_section);
|
||||
// TODO(antoyo): set linkage.
|
||||
let value = cx.const_ptrcast(global1.get_address(None), gcc_type);
|
||||
@ -397,6 +389,6 @@ fn check_and_apply_linkage<'gcc, 'tcx>(
|
||||
// don't do this then linker errors can be generated where the linker
|
||||
// complains that one object files has a thread local version of the
|
||||
// symbol and another one doesn't.
|
||||
cx.declare_global(&sym, gcc_type, GlobalKind::Imported, is_tls, attrs.link_section)
|
||||
cx.declare_global(sym, gcc_type, GlobalKind::Imported, is_tls, attrs.link_section)
|
||||
}
|
||||
}
|
||||
|
@ -384,7 +384,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
pub fn sess(&self) -> &'tcx Session {
|
||||
&self.tcx.sess
|
||||
self.tcx.sess
|
||||
}
|
||||
|
||||
pub fn bitcast_if_needed(
|
||||
@ -431,7 +431,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||
let func_name = self.tcx.symbol_name(instance).name;
|
||||
|
||||
let func = if self.intrinsics.borrow().contains_key(func_name) {
|
||||
self.intrinsics.borrow()[func_name].clone()
|
||||
self.intrinsics.borrow()[func_name]
|
||||
} else {
|
||||
get_fn(self, instance)
|
||||
};
|
||||
@ -485,7 +485,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||
let symbol_name = tcx.symbol_name(instance).name;
|
||||
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
|
||||
self.linkage.set(FunctionType::Extern);
|
||||
let func = self.declare_fn(symbol_name, &fn_abi);
|
||||
let func = self.declare_fn(symbol_name, fn_abi);
|
||||
let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
|
||||
func
|
||||
}
|
||||
@ -505,7 +505,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||
}
|
||||
|
||||
fn sess(&self) -> &Session {
|
||||
&self.tcx.sess
|
||||
self.tcx.sess
|
||||
}
|
||||
|
||||
fn check_overflow(&self) -> bool {
|
||||
@ -612,7 +612,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
|
||||
// user defined names
|
||||
let mut name = String::with_capacity(prefix.len() + 6);
|
||||
name.push_str(prefix);
|
||||
name.push_str(".");
|
||||
name.push('.');
|
||||
base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
|
||||
name
|
||||
}
|
||||
|
@ -225,7 +225,7 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||
file_end_pos: BytePos(0),
|
||||
};
|
||||
let mut fn_debug_context = FunctionDebugContext {
|
||||
scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes.as_slice()),
|
||||
scopes: IndexVec::from_elem(empty_scope, mir.source_scopes.as_slice()),
|
||||
inlined_function_scopes: Default::default(),
|
||||
};
|
||||
|
||||
|
@ -35,7 +35,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
|
||||
pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> {
|
||||
let name = self.generate_local_symbol_name("global");
|
||||
self.context.new_global(None, GlobalKind::Internal, ty, &name)
|
||||
self.context.new_global(None, GlobalKind::Internal, ty, name)
|
||||
}
|
||||
|
||||
pub fn declare_global_with_linkage(
|
||||
@ -176,16 +176,14 @@ fn declare_raw_fn<'gcc>(
|
||||
cx.functions.borrow()[name]
|
||||
} else {
|
||||
let params: Vec<_> = param_types
|
||||
.into_iter()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(index, param)| {
|
||||
cx.context.new_parameter(None, *param, &format!("param{}", index))
|
||||
}) // TODO(antoyo): set name.
|
||||
.map(|(index, param)| cx.context.new_parameter(None, *param, format!("param{}", index))) // TODO(antoyo): set name.
|
||||
.collect();
|
||||
#[cfg(not(feature = "master"))]
|
||||
let name = mangle_name(name);
|
||||
let func =
|
||||
cx.context.new_function(None, cx.linkage.get(), return_type, ¶ms, &name, variadic);
|
||||
cx.context.new_function(None, cx.linkage.get(), return_type, ¶ms, name, variadic);
|
||||
cx.functions.borrow_mut().insert(name.to_string(), func);
|
||||
|
||||
#[cfg(feature = "master")]
|
||||
@ -200,10 +198,10 @@ fn declare_raw_fn<'gcc>(
|
||||
// create a wrapper function that calls rust_eh_personality.
|
||||
|
||||
let params: Vec<_> = param_types
|
||||
.into_iter()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(index, param)| {
|
||||
cx.context.new_parameter(None, *param, &format!("param{}", index))
|
||||
cx.context.new_parameter(None, *param, format!("param{}", index))
|
||||
}) // TODO(antoyo): set name.
|
||||
.collect();
|
||||
let gcc_func = cx.context.new_function(
|
||||
|
19
src/int.rs
19
src/int.rs
@ -2,8 +2,6 @@
|
||||
//! This module exists because some integer types are not supported on some gcc platforms, e.g.
|
||||
//! 128-bit integers on 32-bit platforms and thus require to be handled manually.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use gccjit::{BinaryOp, ComparisonOp, FunctionType, Location, RValue, ToRValue, Type, UnaryOp};
|
||||
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
|
||||
use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
|
||||
@ -126,7 +124,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
let shift_value = self.gcc_sub(sixty_four, b);
|
||||
// NOTE: cast low to its unsigned type in order to perform a logical right shift.
|
||||
let unsigned_type = native_int_type.to_unsigned(&self.cx);
|
||||
let unsigned_type = native_int_type.to_unsigned(self.cx);
|
||||
let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type);
|
||||
let shifted_low = casted_low >> self.context.new_cast(self.location, b, unsigned_type);
|
||||
let shifted_low = self.context.new_cast(self.location, shifted_low, native_int_type);
|
||||
@ -258,7 +256,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let new_kind = match typ.kind() {
|
||||
Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
t @ (Uint(_) | Int(_)) => t.clone(),
|
||||
t @ (Uint(_) | Int(_)) => *t,
|
||||
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
|
||||
};
|
||||
|
||||
@ -344,7 +342,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
}
|
||||
};
|
||||
|
||||
let intrinsic = self.context.get_builtin_function(&name);
|
||||
let intrinsic = self.context.get_builtin_function(name);
|
||||
let res = self
|
||||
.current_func()
|
||||
// TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
|
||||
@ -454,7 +452,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let native_int_type = a_type.dyncast_array().expect("get element type");
|
||||
// NOTE: cast low to its unsigned type in order to perform a comparison correctly (e.g.
|
||||
// the sign is only on high).
|
||||
let unsigned_type = native_int_type.to_unsigned(&self.cx);
|
||||
let unsigned_type = native_int_type.to_unsigned(self.cx);
|
||||
|
||||
let lhs_low = self.context.new_cast(self.location, self.low(lhs), unsigned_type);
|
||||
let rhs_low = self.context.new_cast(self.location, self.low(rhs), unsigned_type);
|
||||
@ -589,7 +587,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
| IntPredicate::IntULT
|
||||
| IntPredicate::IntULE => {
|
||||
if !a_type.is_vector() {
|
||||
let unsigned_type = a_type.to_unsigned(&self.cx);
|
||||
let unsigned_type = a_type.to_unsigned(self.cx);
|
||||
lhs = self.context.new_cast(self.location, lhs, unsigned_type);
|
||||
rhs = self.context.new_cast(self.location, rhs, unsigned_type);
|
||||
}
|
||||
@ -673,7 +671,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
// NOTE: cast low to its unsigned type in order to perform a logical right shift.
|
||||
// TODO(antoyo): adjust this ^ comment.
|
||||
let unsigned_type = native_int_type.to_unsigned(&self.cx);
|
||||
let unsigned_type = native_int_type.to_unsigned(self.cx);
|
||||
let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type);
|
||||
let shift_value = self.context.new_cast(self.location, sixty_four - b, unsigned_type);
|
||||
let high_low =
|
||||
@ -727,7 +725,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
|
||||
if self.is_native_int_type_or_bool(typ) {
|
||||
self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
|
||||
self.context.new_rvalue_from_long(typ, int)
|
||||
} else {
|
||||
// NOTE: set the sign in high.
|
||||
self.from_low_high(typ, int, -(int.is_negative() as i64))
|
||||
@ -740,8 +738,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
|
||||
let num = self.context.new_rvalue_from_long(self.u64_type, int as i64);
|
||||
self.gcc_int_cast(num, typ)
|
||||
} else if self.is_native_int_type_or_bool(typ) {
|
||||
self.context
|
||||
.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
|
||||
self.context.new_rvalue_from_long(typ, int as i64)
|
||||
} else {
|
||||
self.from_low_high(typ, int as i64, 0)
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
|
||||
// Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing
|
||||
// arguments here.
|
||||
if gcc_func.get_param_count() != args.len() {
|
||||
match &*func_name {
|
||||
match func_name {
|
||||
// NOTE: the following intrinsics have a different number of parameters in LLVM and GCC.
|
||||
"__builtin_ia32_prold512_mask"
|
||||
| "__builtin_ia32_pmuldq512_mask"
|
||||
@ -380,7 +380,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
|
||||
_ => (),
|
||||
}
|
||||
} else {
|
||||
match &*func_name {
|
||||
match func_name {
|
||||
"__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => {
|
||||
let new_args = args.to_vec();
|
||||
let arg3_type = gcc_func.get_param_type(2);
|
||||
@ -629,14 +629,11 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
|
||||
|
||||
#[cfg(feature = "master")]
|
||||
pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
|
||||
match name {
|
||||
"llvm.prefetch" => {
|
||||
let gcc_name = "__builtin_prefetch";
|
||||
let func = cx.context.get_builtin_function(gcc_name);
|
||||
cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
|
||||
return func;
|
||||
}
|
||||
_ => (),
|
||||
if name == "llvm.prefetch" {
|
||||
let gcc_name = "__builtin_prefetch";
|
||||
let func = cx.context.get_builtin_function(gcc_name);
|
||||
cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
|
||||
return func;
|
||||
}
|
||||
|
||||
let gcc_name = match name {
|
||||
|
@ -91,7 +91,7 @@ fn get_simple_intrinsic<'gcc, 'tcx>(
|
||||
sym::abort => "abort",
|
||||
_ => return None,
|
||||
};
|
||||
Some(cx.context.get_builtin_function(&gcc_name))
|
||||
Some(cx.context.get_builtin_function(gcc_name))
|
||||
}
|
||||
|
||||
impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
@ -699,13 +699,13 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let count_leading_zeroes =
|
||||
// TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
|
||||
// instead of using is_uint().
|
||||
if arg_type.is_uint(&self.cx) {
|
||||
if arg_type.is_uint(self.cx) {
|
||||
"__builtin_clz"
|
||||
}
|
||||
else if arg_type.is_ulong(&self.cx) {
|
||||
else if arg_type.is_ulong(self.cx) {
|
||||
"__builtin_clzl"
|
||||
}
|
||||
else if arg_type.is_ulonglong(&self.cx) {
|
||||
else if arg_type.is_ulonglong(self.cx) {
|
||||
"__builtin_clzll"
|
||||
}
|
||||
else if width == 128 {
|
||||
@ -780,17 +780,17 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
let (count_trailing_zeroes, expected_type) =
|
||||
// TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
|
||||
// instead of using is_uint().
|
||||
if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
|
||||
if arg_type.is_uchar(self.cx) || arg_type.is_ushort(self.cx) || arg_type.is_uint(self.cx) {
|
||||
// NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
|
||||
("__builtin_ctz", self.cx.uint_type)
|
||||
}
|
||||
else if arg_type.is_ulong(&self.cx) {
|
||||
else if arg_type.is_ulong(self.cx) {
|
||||
("__builtin_ctzl", self.cx.ulong_type)
|
||||
}
|
||||
else if arg_type.is_ulonglong(&self.cx) {
|
||||
else if arg_type.is_ulonglong(self.cx) {
|
||||
("__builtin_ctzll", self.cx.ulonglong_type)
|
||||
}
|
||||
else if arg_type.is_u128(&self.cx) {
|
||||
else if arg_type.is_u128(self.cx) {
|
||||
// Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
|
||||
let array_type = self.context.new_array_type(None, arg_type, 3);
|
||||
let result = self.current_func()
|
||||
@ -872,7 +872,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
// only break apart 128-bit ints if they're not natively supported
|
||||
// TODO(antoyo): remove this if/when native 128-bit integers land in libgccjit
|
||||
if value_type.is_u128(&self.cx) && !self.cx.supports_128bit_integers {
|
||||
if value_type.is_u128(self.cx) && !self.cx.supports_128bit_integers {
|
||||
let sixty_four = self.gcc_int(value_type, 64);
|
||||
let right_shift = self.gcc_lshr(value, sixty_four);
|
||||
let high = self.gcc_int_cast(right_shift, self.cx.ulonglong_type);
|
||||
@ -995,7 +995,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
|
||||
// Return `result_type`'s maximum or minimum value on overflow
|
||||
// NOTE: convert the type to unsigned to have an unsigned shift.
|
||||
let unsigned_type = result_type.to_unsigned(&self.cx);
|
||||
let unsigned_type = result_type.to_unsigned(self.cx);
|
||||
let shifted = self.gcc_lshr(
|
||||
self.gcc_int_cast(lhs, unsigned_type),
|
||||
self.gcc_int(unsigned_type, width as i64 - 1),
|
||||
|
@ -308,10 +308,9 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
})
|
||||
.collect();
|
||||
return Ok(bx.context.new_rvalue_from_vector(None, v_type, &elems));
|
||||
} else {
|
||||
// avoid the unnecessary truncation as an optimization.
|
||||
return Ok(bx.context.new_bitcast(None, result, v_type));
|
||||
}
|
||||
// avoid the unnecessary truncation as an optimization.
|
||||
return Ok(bx.context.new_bitcast(None, result, v_type));
|
||||
}
|
||||
// since gcc doesn't have vector shuffle methods available in non-patched builds, fallback to
|
||||
// component-wise bitreverses if they're not available.
|
||||
|
@ -220,7 +220,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
|
||||
// to fn_ptr_backend_type handle the on-stack attribute.
|
||||
// TODO(antoyo): find a less hackish way to hande the on-stack attribute.
|
||||
ty::FnPtr(sig) => {
|
||||
cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
|
||||
cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
|
||||
}
|
||||
_ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user