mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-28 01:34:21 +00:00
Merge pull request #1058 from bjorn3/misc_rustc_test_suite_fixes
Misc rustc test suite fixes
This commit is contained in:
commit
05fc1f4add
@ -69,7 +69,7 @@ function jit_calc() {
|
||||
to make it possible to use incremental mode for all analyses performed by rustc without caching
|
||||
object files when their content should have been changed by a change to cg_clif.</dd>
|
||||
<dt>CG_CLIF_DISPLAY_CG_TIME</dt>
|
||||
<dd>Display the time it took to perform codegen for a crate</dd>
|
||||
<dd>If "1", display the time it took to perform codegen for a crate</dd>
|
||||
</dl>
|
||||
|
||||
## Not yet supported
|
||||
|
@ -103,6 +103,14 @@ fn main() {
|
||||
Box::pin(move |mut _task_context| {
|
||||
yield ();
|
||||
}).as_mut().resume(0);
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
enum Nums {
|
||||
NegOne = -1,
|
||||
}
|
||||
|
||||
let kind = Nums::NegOne;
|
||||
assert_eq!(-1i128, kind as i128);
|
||||
}
|
||||
|
||||
#[target_feature(enable = "sse2")]
|
||||
|
@ -689,6 +689,9 @@ fn trans_stmt<'tcx>(
|
||||
asm_str_style: _,
|
||||
} = asm;
|
||||
match &*asm_code.as_str() {
|
||||
"" => {
|
||||
// Black box
|
||||
}
|
||||
cpuid if cpuid.contains("cpuid") => {
|
||||
crate::trap::trap_unimplemented(
|
||||
fx,
|
||||
|
@ -109,7 +109,7 @@ fn trans_mono_item<'tcx, B: Backend + 'static>(
|
||||
}
|
||||
|
||||
fn time<R>(tcx: TyCtxt<'_>, name: &'static str, f: impl FnOnce() -> R) -> R {
|
||||
if std::env::var("CG_CLIF_DISPLAY_CG_TIME").is_ok() {
|
||||
if std::env::var("CG_CLIF_DISPLAY_CG_TIME").as_ref().map(|val| &**val) == Ok("1") {
|
||||
println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
|
||||
let before = std::time::Instant::now();
|
||||
let res = tcx.sess.time(name, f);
|
||||
|
@ -138,6 +138,27 @@ macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $
|
||||
crate::atomic_shim::unlock_global_lock($fx);
|
||||
}
|
||||
|
||||
macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
|
||||
match $ty.kind {
|
||||
ty::Uint(_) | ty::Int(_) => {}
|
||||
_ => {
|
||||
$fx.tcx.sess.span_err($span, &format!("`{}` intrinsic: expected basic integer type, found `{:?}`", $intrinsic, $ty));
|
||||
// Prevent verifier error
|
||||
crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
|
||||
if !$ty.is_simd() {
|
||||
$fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
|
||||
// Prevent verifier error
|
||||
crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
fn lane_type_and_count<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
layout: TyAndLayout<'tcx>,
|
||||
@ -817,7 +838,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
|
||||
dest.write_cvalue(fx, val);
|
||||
};
|
||||
|
||||
size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name, () {
|
||||
size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
|
||||
let const_val =
|
||||
fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
|
||||
let val = crate::constant::trans_const_value(
|
||||
@ -866,12 +887,15 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
|
||||
|
||||
let inner_layout =
|
||||
fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
|
||||
validate_atomic_type!(fx, intrinsic, span, inner_layout.ty);
|
||||
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
|
||||
ret.write_cvalue(fx, val);
|
||||
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
|
||||
validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
|
||||
|
||||
crate::atomic_shim::lock_global_lock(fx);
|
||||
|
||||
let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
|
||||
@ -880,6 +904,8 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
|
||||
validate_atomic_type!(fx, intrinsic, span, T);
|
||||
|
||||
crate::atomic_shim::lock_global_lock(fx);
|
||||
|
||||
// Read old
|
||||
@ -893,7 +919,12 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
|
||||
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
|
||||
_ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
|
||||
validate_atomic_type!(fx, intrinsic, span, T);
|
||||
|
||||
let test_old = test_old.load_scalar(fx);
|
||||
let new = new.load_scalar(fx);
|
||||
|
||||
crate::atomic_shim::lock_global_lock(fx);
|
||||
|
||||
// Read old
|
||||
@ -913,16 +944,26 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
|
||||
_ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
|
||||
_ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, c amount) {
|
||||
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
|
||||
let amount = amount.load_scalar(fx);
|
||||
atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, v amount) {
|
||||
_ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, c amount) {
|
||||
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
|
||||
let amount = amount.load_scalar(fx);
|
||||
atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_and"), <T> (v ptr, v src) {
|
||||
_ if intrinsic.starts_with("atomic_and"), <T> (v ptr, c src) {
|
||||
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
|
||||
let src = src.load_scalar(fx);
|
||||
atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, v src) {
|
||||
_ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, c src) {
|
||||
validate_atomic_type!(fx, intrinsic, span, T);
|
||||
|
||||
let src = src.load_scalar(fx);
|
||||
|
||||
crate::atomic_shim::lock_global_lock(fx);
|
||||
|
||||
let clif_ty = fx.clif_type(T).unwrap();
|
||||
@ -934,23 +975,35 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
|
||||
|
||||
crate::atomic_shim::unlock_global_lock(fx);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_or"), <T> (v ptr, v src) {
|
||||
_ if intrinsic.starts_with("atomic_or"), <T> (v ptr, c src) {
|
||||
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
|
||||
let src = src.load_scalar(fx);
|
||||
atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, v src) {
|
||||
_ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, c src) {
|
||||
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
|
||||
let src = src.load_scalar(fx);
|
||||
atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
|
||||
};
|
||||
|
||||
_ if intrinsic.starts_with("atomic_max"), <T> (v ptr, v src) {
|
||||
_ if intrinsic.starts_with("atomic_max"), <T> (v ptr, c src) {
|
||||
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
|
||||
let src = src.load_scalar(fx);
|
||||
atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, v src) {
|
||||
_ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, c src) {
|
||||
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
|
||||
let src = src.load_scalar(fx);
|
||||
atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_min"), <T> (v ptr, v src) {
|
||||
_ if intrinsic.starts_with("atomic_min"), <T> (v ptr, c src) {
|
||||
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
|
||||
let src = src.load_scalar(fx);
|
||||
atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
|
||||
};
|
||||
_ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, v src) {
|
||||
_ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, c src) {
|
||||
validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
|
||||
let src = src.load_scalar(fx);
|
||||
atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
|
||||
};
|
||||
|
||||
|
@ -21,6 +21,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
};
|
||||
|
||||
simd_cast, (c a) {
|
||||
validate_simd_type!(fx, intrinsic, span, a.layout().ty);
|
||||
simd_for_each_lane(fx, a, ret, |fx, lane_layout, ret_lane_layout, lane| {
|
||||
let ret_lane_ty = fx.clif_type(ret_lane_layout.ty).unwrap();
|
||||
|
||||
@ -33,26 +34,34 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
};
|
||||
|
||||
simd_eq, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_cmp!(fx, Equal(x, y) -> ret);
|
||||
};
|
||||
simd_ne, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_cmp!(fx, NotEqual(x, y) -> ret);
|
||||
};
|
||||
simd_lt, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_cmp!(fx, UnsignedLessThan|SignedLessThan(x, y) -> ret);
|
||||
};
|
||||
simd_le, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_cmp!(fx, UnsignedLessThanOrEqual|SignedLessThanOrEqual(x, y) -> ret);
|
||||
};
|
||||
simd_gt, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_cmp!(fx, UnsignedGreaterThan|SignedGreaterThan(x, y) -> ret);
|
||||
};
|
||||
simd_ge, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_cmp!(fx, UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual(x, y) -> ret);
|
||||
};
|
||||
|
||||
// simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
|
||||
_ if intrinsic.starts_with("simd_shuffle"), (c x, c y, o idx) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
|
||||
let n: u16 = intrinsic["simd_shuffle".len()..].parse().unwrap();
|
||||
|
||||
assert_eq!(x.layout(), y.layout());
|
||||
@ -105,6 +114,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
};
|
||||
|
||||
simd_insert, (c base, o idx, v _val) {
|
||||
// FIXME validate
|
||||
let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
|
||||
idx_const
|
||||
} else {
|
||||
@ -132,6 +142,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
};
|
||||
|
||||
simd_extract, (c v, o idx) {
|
||||
validate_simd_type!(fx, intrinsic, span, v.layout().ty);
|
||||
let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
|
||||
idx_const
|
||||
} else {
|
||||
@ -155,34 +166,44 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
};
|
||||
|
||||
simd_add, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_int_flt_binop!(fx, iadd|fadd(x, y) -> ret);
|
||||
};
|
||||
simd_sub, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_int_flt_binop!(fx, isub|fsub(x, y) -> ret);
|
||||
};
|
||||
simd_mul, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_int_flt_binop!(fx, imul|fmul(x, y) -> ret);
|
||||
};
|
||||
simd_div, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_int_flt_binop!(fx, udiv|sdiv|fdiv(x, y) -> ret);
|
||||
};
|
||||
simd_shl, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_int_binop!(fx, ishl(x, y) -> ret);
|
||||
};
|
||||
simd_shr, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_int_binop!(fx, ushr|sshr(x, y) -> ret);
|
||||
};
|
||||
simd_and, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_int_binop!(fx, band(x, y) -> ret);
|
||||
};
|
||||
simd_or, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_int_binop!(fx, bor(x, y) -> ret);
|
||||
};
|
||||
simd_xor, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_int_binop!(fx, bxor(x, y) -> ret);
|
||||
};
|
||||
|
||||
simd_fma, (c a, c b, c c) {
|
||||
validate_simd_type!(fx, intrinsic, span, a.layout().ty);
|
||||
assert_eq!(a.layout(), b.layout());
|
||||
assert_eq!(a.layout(), c.layout());
|
||||
let layout = a.layout();
|
||||
@ -205,9 +226,11 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
|
||||
};
|
||||
|
||||
simd_fmin, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_flt_binop!(fx, fmin(x, y) -> ret);
|
||||
};
|
||||
simd_fmax, (c x, c y) {
|
||||
validate_simd_type!(fx, intrinsic, span, x.layout().ty);
|
||||
simd_flt_binop!(fx, fmax(x, y) -> ret);
|
||||
};
|
||||
}
|
||||
|
@ -234,6 +234,8 @@ impl CodegenBackend for CraneliftCodegenBackend {
|
||||
) -> Result<(), ErrorReported> {
|
||||
use rustc_codegen_ssa::back::link::link_binary;
|
||||
|
||||
sess.abort_if_errors();
|
||||
|
||||
let codegen_results = *res
|
||||
.downcast::<CodegenResults>()
|
||||
.expect("Expected CraneliftCodegenBackend's CodegenResult, found Box<Any>");
|
||||
|
@ -406,6 +406,10 @@ impl<'tcx> CPlace<'tcx> {
|
||||
to_ty: Ty<'tcx>,
|
||||
) {
|
||||
match (&from_ty.kind, &to_ty.kind) {
|
||||
(ty::Ref(_, a, _), ty::Ref(_, b, _))
|
||||
| (ty::RawPtr(TypeAndMut { ty: a, mutbl: _}), ty::RawPtr(TypeAndMut { ty: b, mutbl: _})) => {
|
||||
assert_assignable(fx, a, b);
|
||||
}
|
||||
(ty::FnPtr(_), ty::FnPtr(_)) => {
|
||||
let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
|
||||
ParamEnv::reveal_all(),
|
||||
|
Loading…
Reference in New Issue
Block a user