mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-23 07:14:28 +00:00
fix(fmt/style): Further apply Clippy suggestions manually
1. Fix Pattern Type Mismatch by Adding deref's 2. Move commented `else if` to previous block in `intrinsic.rs`
This commit is contained in:
parent
6f76488b2f
commit
9ea3c19055
@ -91,7 +91,7 @@ fn compute_mir_scopes<'gcc, 'tcx>(
|
||||
/// FIXME(tempdragon/?): Add Scope Support Here.
|
||||
fn make_mir_scope<'gcc, 'tcx>(
|
||||
cx: &CodegenCx<'gcc, 'tcx>,
|
||||
instance: Instance<'tcx>,
|
||||
_instance: Instance<'tcx>,
|
||||
mir: &Body<'tcx>,
|
||||
variables: &Option<BitSet<SourceScope>>,
|
||||
debug_context: &mut FunctionDebugContext<'tcx, (), Location<'gcc>>,
|
||||
@ -104,7 +104,7 @@ fn make_mir_scope<'gcc, 'tcx>(
|
||||
|
||||
let scope_data = &mir.source_scopes[scope];
|
||||
let parent_scope = if let Some(parent) = scope_data.parent_scope {
|
||||
make_mir_scope(cx, instance, mir, variables, debug_context, instantiated, parent);
|
||||
make_mir_scope(cx, _instance, mir, variables, debug_context, instantiated, parent);
|
||||
debug_context.scopes[parent]
|
||||
} else {
|
||||
// The root is the function itself.
|
||||
@ -118,7 +118,7 @@ fn make_mir_scope<'gcc, 'tcx>(
|
||||
return;
|
||||
};
|
||||
|
||||
if let Some(vars) = variables {
|
||||
if let Some(ref vars) = *variables {
|
||||
if !vars.contains(scope) && scope_data.inlined.is_none() {
|
||||
// Do not create a DIScope if there are no variables defined in this
|
||||
// MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
|
||||
@ -136,8 +136,13 @@ fn make_mir_scope<'gcc, 'tcx>(
|
||||
let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
|
||||
// FIXME(eddyb) this doesn't account for the macro-related
|
||||
// `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does.
|
||||
let callsite_scope = parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
|
||||
cx.dbg_loc(callsite_scope, parent_scope.inlined_at, callsite_span)
|
||||
|
||||
// NOTE: These variables passed () here.
|
||||
// Changed to comply to clippy.
|
||||
|
||||
/* let callsite_scope = */
|
||||
parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
|
||||
cx.dbg_loc(/* callsite_scope */ (), parent_scope.inlined_at, callsite_span)
|
||||
});
|
||||
let p_inlined_at = parent_scope.inlined_at;
|
||||
// TODO(tempdragon): dbg_scope: Add support for scope extension here.
|
||||
@ -275,15 +280,15 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||
let pos = span.lo();
|
||||
let DebugLoc { file, line, col } = self.lookup_debug_loc(pos);
|
||||
let loc = match &file.name {
|
||||
rustc_span::FileName::Real(name) => match name {
|
||||
rustc_span::RealFileName::LocalPath(name) => {
|
||||
rustc_span::FileName::Real(ref name) => match &name {
|
||||
rustc_span::RealFileName::LocalPath(ref name) => {
|
||||
if let Some(name) = name.to_str() {
|
||||
self.context.new_location(name, line as i32, col as i32)
|
||||
} else {
|
||||
Location::null()
|
||||
}
|
||||
}
|
||||
rustc_span::RealFileName::Remapped { local_path, virtual_name: _ } => {
|
||||
rustc_span::RealFileName::Remapped { ref local_path, virtual_name: _unused } => {
|
||||
if let Some(name) = local_path.as_ref() {
|
||||
if let Some(name) = name.to_str() {
|
||||
self.context.new_location(name, line as i32, col as i32)
|
||||
|
@ -253,10 +253,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
|
||||
) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
|
||||
use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
|
||||
|
||||
let new_kind = match typ.kind() {
|
||||
let new_kind = match *typ.kind() {
|
||||
Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
|
||||
t @ (Uint(_) | Int(_)) => *t,
|
||||
t @ (Uint(_) | Int(_)) => t,
|
||||
_ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
|
||||
};
|
||||
|
||||
|
@ -166,7 +166,9 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
sym::volatile_load | sym::unaligned_volatile_load => {
|
||||
let tp_ty = fn_args.type_at(0);
|
||||
let ptr = args[0].immediate();
|
||||
let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
|
||||
// The reference was changed to clone to comply to clippy.
|
||||
let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = fn_abi.ret.mode.clone()
|
||||
{
|
||||
let gcc_ty = ty.gcc_type(self);
|
||||
self.volatile_load(gcc_ty, ptr)
|
||||
} else {
|
||||
@ -308,17 +310,18 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
let b = args[1].immediate();
|
||||
if layout.size().bytes() == 0 {
|
||||
self.const_bool(true)
|
||||
}
|
||||
/*else if use_integer_compare {
|
||||
let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
|
||||
let ptr_ty = self.type_ptr_to(integer_ty);
|
||||
let a_ptr = self.bitcast(a, ptr_ty);
|
||||
let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
|
||||
let b_ptr = self.bitcast(b, ptr_ty);
|
||||
let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
|
||||
self.icmp(IntPredicate::IntEQ, a_val, b_val)
|
||||
}*/
|
||||
else {
|
||||
// The else if an immediate neighbor of this block.
|
||||
// It is moved here to comply to Clippy.
|
||||
/*else if use_integer_compare {
|
||||
let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
|
||||
let ptr_ty = self.type_ptr_to(integer_ty);
|
||||
let a_ptr = self.bitcast(a, ptr_ty);
|
||||
let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
|
||||
let b_ptr = self.bitcast(b, ptr_ty);
|
||||
let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
|
||||
self.icmp(IntPredicate::IntEQ, a_val, b_val)
|
||||
}*/
|
||||
} else {
|
||||
let void_ptr_type = self.context.new_type::<*const ()>();
|
||||
let a_ptr = self.bitcast(a, void_ptr_type);
|
||||
let b_ptr = self.bitcast(b, void_ptr_type);
|
||||
@ -385,7 +388,8 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||
};
|
||||
|
||||
if !fn_abi.ret.is_ignore() {
|
||||
if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
|
||||
// The reference was changed to clone to comply to clippy.
|
||||
if let PassMode::Cast { cast: ty, .. } = fn_abi.ret.mode.clone() {
|
||||
let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
|
||||
let ptr = self.pointercast(result.llval, ptr_llty);
|
||||
self.store(llval, ptr, result.align);
|
||||
@ -586,7 +590,7 @@ fn int_type_width_signed<'gcc, 'tcx>(
|
||||
ty: Ty<'tcx>,
|
||||
cx: &CodegenCx<'gcc, 'tcx>,
|
||||
) -> Option<(u64, bool)> {
|
||||
match ty.kind() {
|
||||
match *ty.kind() {
|
||||
ty::Int(t) => Some((
|
||||
match t {
|
||||
rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
|
||||
|
@ -71,11 +71,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
let expected_bytes = len / 8 + ((len % 8 > 0) as u64);
|
||||
|
||||
let mask_ty = arg_tys[0];
|
||||
let mut mask = match mask_ty.kind() {
|
||||
let mut mask = match *mask_ty.kind() {
|
||||
ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
|
||||
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
|
||||
ty::Array(elem, len)
|
||||
if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
|
||||
if matches!(*elem.kind(), ty::Uint(ty::UintTy::U8))
|
||||
&& len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
|
||||
== Some(expected_bytes) =>
|
||||
{
|
||||
@ -353,8 +353,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
if name == sym::simd_shuffle {
|
||||
// Make sure this is actually an array, since typeck only checks the length-suffixed
|
||||
// version of this intrinsic.
|
||||
let n: u64 = match args[2].layout.ty.kind() {
|
||||
ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
|
||||
let n: u64 = match *args[2].layout.ty.kind() {
|
||||
ty::Array(ty, len) if matches!(*ty.kind(), ty::Uint(ty::UintTy::U32)) => {
|
||||
len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(
|
||||
|| span_bug!(span, "could not evaluate shuffle index array length"),
|
||||
)
|
||||
@ -427,7 +427,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
m_len == v_len,
|
||||
InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
|
||||
);
|
||||
match m_elem_ty.kind() {
|
||||
match *m_elem_ty.kind() {
|
||||
ty::Int(_) => {}
|
||||
_ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }),
|
||||
}
|
||||
@ -460,13 +460,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
Unsupported,
|
||||
}
|
||||
|
||||
let in_style = match in_elem.kind() {
|
||||
let in_style = match *in_elem.kind() {
|
||||
ty::Int(_) | ty::Uint(_) => Style::Int,
|
||||
ty::Float(_) => Style::Float,
|
||||
_ => Style::Unsupported,
|
||||
};
|
||||
|
||||
let out_style = match out_elem.kind() {
|
||||
let out_style = match *out_elem.kind() {
|
||||
ty::Int(_) | ty::Uint(_) => Style::Int,
|
||||
ty::Float(_) => Style::Float,
|
||||
_ => Style::Unsupported,
|
||||
@ -493,7 +493,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
macro_rules! arith_binary {
|
||||
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
|
||||
$(if name == sym::$name {
|
||||
match in_elem.kind() {
|
||||
match *in_elem.kind() {
|
||||
$($(ty::$p(_))|* => {
|
||||
return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
|
||||
})*
|
||||
@ -543,13 +543,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
shift += 1;
|
||||
}
|
||||
|
||||
match ret_ty.kind() {
|
||||
match *ret_ty.kind() {
|
||||
ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
|
||||
// Zero-extend iN to the bitmask type:
|
||||
return Ok(result);
|
||||
}
|
||||
ty::Array(elem, len)
|
||||
if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
|
||||
if matches!(*elem.kind(), ty::Uint(ty::UintTy::U8))
|
||||
&& len.try_eval_target_usize(bx.tcx, ty::ParamEnv::reveal_all())
|
||||
== Some(expected_bytes) =>
|
||||
{
|
||||
@ -588,7 +588,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
return Err(());
|
||||
}};
|
||||
}
|
||||
let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
|
||||
let (elem_ty_str, elem_ty) = if let ty::Float(ref f) = *in_elem.kind() {
|
||||
let elem_ty = bx.cx.type_float_from_ty(*f);
|
||||
match f.bit_width() {
|
||||
32 => ("f", elem_ty),
|
||||
@ -795,7 +795,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
|
||||
// This counts how many pointers
|
||||
fn ptr_count(t: Ty<'_>) -> usize {
|
||||
match t.kind() {
|
||||
match *t.kind() {
|
||||
ty::RawPtr(p) => 1 + ptr_count(p.ty),
|
||||
_ => 0,
|
||||
}
|
||||
@ -803,7 +803,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
|
||||
// Non-ptr type
|
||||
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
|
||||
match t.kind() {
|
||||
match *t.kind() {
|
||||
ty::RawPtr(p) => non_ptr(p.ty),
|
||||
_ => t,
|
||||
}
|
||||
@ -813,7 +813,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
// to the element type of the first argument
|
||||
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
|
||||
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
|
||||
let (pointer_count, underlying_ty) = match element_ty1.kind() {
|
||||
let (pointer_count, underlying_ty) = match *element_ty1.kind() {
|
||||
ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
|
||||
_ => {
|
||||
require!(
|
||||
@ -837,7 +837,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
|
||||
// The element type of the third argument must be a signed integer type of any width:
|
||||
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
|
||||
match element_ty2.kind() {
|
||||
match *element_ty2.kind() {
|
||||
ty::Int(_) => (),
|
||||
_ => {
|
||||
require!(
|
||||
@ -909,7 +909,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
|
||||
// This counts how many pointers
|
||||
fn ptr_count(t: Ty<'_>) -> usize {
|
||||
match t.kind() {
|
||||
match *t.kind() {
|
||||
ty::RawPtr(p) => 1 + ptr_count(p.ty),
|
||||
_ => 0,
|
||||
}
|
||||
@ -917,7 +917,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
|
||||
// Non-ptr type
|
||||
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
|
||||
match t.kind() {
|
||||
match *t.kind() {
|
||||
ty::RawPtr(p) => non_ptr(p.ty),
|
||||
_ => t,
|
||||
}
|
||||
@ -928,7 +928,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
|
||||
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
|
||||
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
|
||||
let (pointer_count, underlying_ty) = match element_ty1.kind() {
|
||||
let (pointer_count, underlying_ty) = match *element_ty1.kind() {
|
||||
ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
|
||||
(ptr_count(element_ty1), non_ptr(element_ty1))
|
||||
}
|
||||
@ -953,7 +953,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
assert_eq!(underlying_ty, non_ptr(element_ty0));
|
||||
|
||||
// The element type of the third argument must be a signed integer type of any width:
|
||||
match element_ty2.kind() {
|
||||
match *element_ty2.kind() {
|
||||
ty::Int(_) => (),
|
||||
_ => {
|
||||
require!(
|
||||
@ -1011,7 +1011,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
macro_rules! arith_unary {
|
||||
($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
|
||||
$(if name == sym::$name {
|
||||
match in_elem.kind() {
|
||||
match *in_elem.kind() {
|
||||
$($(ty::$p(_))|* => {
|
||||
return Ok(bx.$call(args[0].immediate()))
|
||||
})*
|
||||
@ -1135,7 +1135,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
ret_ty == in_elem,
|
||||
InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
|
||||
);
|
||||
return match in_elem.kind() {
|
||||
return match *in_elem.kind() {
|
||||
ty::Int(_) | ty::Uint(_) => {
|
||||
let r = bx.vector_reduce_op(args[0].immediate(), $vec_op);
|
||||
if $ordered {
|
||||
@ -1204,7 +1204,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
ret_ty == in_elem,
|
||||
InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
|
||||
);
|
||||
return match in_elem.kind() {
|
||||
return match *in_elem.kind() {
|
||||
ty::Int(_) | ty::Uint(_) => Ok(bx.$int_red(args[0].immediate())),
|
||||
ty::Float(_) => Ok(bx.$float_red(args[0].immediate())),
|
||||
_ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
|
||||
@ -1233,7 +1233,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
);
|
||||
args[0].immediate()
|
||||
} else {
|
||||
match in_elem.kind() {
|
||||
match *in_elem.kind() {
|
||||
ty::Int(_) | ty::Uint(_) => {}
|
||||
_ => return_error!(InvalidMonomorphization::UnsupportedSymbol {
|
||||
span,
|
||||
@ -1247,7 +1247,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
||||
|
||||
args[0].immediate()
|
||||
};
|
||||
return match in_elem.kind() {
|
||||
return match *in_elem.kind() {
|
||||
ty::Int(_) | ty::Uint(_) => {
|
||||
let r = bx.vector_reduce_op(input, $op);
|
||||
Ok(if !$boolean {
|
||||
|
@ -90,7 +90,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(
|
||||
Abi::Uninhabited | Abi::Aggregate { .. } => {}
|
||||
}
|
||||
|
||||
let name = match layout.ty.kind() {
|
||||
let name = match *layout.ty.kind() {
|
||||
// FIXME(eddyb) producing readable type names for trait objects can result
|
||||
// in problematically distinct types due to HRTB and subtyping (see #47638).
|
||||
// ty::Dynamic(..) |
|
||||
|
Loading…
Reference in New Issue
Block a user