mirror of
https://github.com/rust-lang/rust.git
synced 2025-04-17 06:26:55 +00:00
Auto merge of #132079 - fmease:rollup-agrd358, r=fmease
Rollup of 9 pull requests Successful merges: - #130991 (Vectorized SliceContains) - #131928 (rustdoc: Document `markdown` module.) - #131955 (Set `signext` or `zeroext` for integer arguments on RISC-V and LoongArch64) - #131979 (Minor tweaks to `compare_impl_item.rs`) - #132036 (Add a test case for #131164) - #132039 (Specialize `read_exact` and `read_buf_exact` for `VecDeque`) - #132060 ("innermost", "outermost", "leftmost", and "rightmost" don't need hyphens) - #132065 (Clarify documentation of `ptr::dangling()` function) - #132066 (Fix a typo in documentation of `pointer::sub_ptr()`) r? `@ghost` `@rustbot` modify labels: rollup
This commit is contained in:
commit
b8bb2968ce
@ -197,7 +197,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
|
||||
/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
|
||||
/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
|
||||
/// If the type is an unsized struct, the regular layout is generated,
|
||||
/// with the inner-most trailing unsized field using the "minimal unit"
|
||||
/// with the innermost trailing unsized field using the "minimal unit"
|
||||
/// of that field's type - this is useful for taking the address of
|
||||
/// that field and ensuring the struct has the right alignment.
|
||||
fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
|
||||
|
@ -191,7 +191,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||
/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
|
||||
/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
|
||||
/// If the type is an unsized struct, the regular layout is generated,
|
||||
/// with the inner-most trailing unsized field using the "minimal unit"
|
||||
/// with the innermost trailing unsized field using the "minimal unit"
|
||||
/// of that field's type - this is useful for taking the address of
|
||||
/// that field and ensuring the struct has the right alignment.
|
||||
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
|
||||
|
@ -888,7 +888,7 @@ impl CrateInfo {
|
||||
// below.
|
||||
//
|
||||
// In order to get this left-to-right dependency ordering, we use the reverse
|
||||
// postorder of all crates putting the leaves at the right-most positions.
|
||||
// postorder of all crates putting the leaves at the rightmost positions.
|
||||
let mut compiler_builtins = None;
|
||||
let mut used_crates: Vec<_> = tcx
|
||||
.postorder_cnums(())
|
||||
|
@ -58,9 +58,9 @@ impl HumanReadableErrorType {
|
||||
struct Margin {
|
||||
/// The available whitespace in the left that can be consumed when centering.
|
||||
pub whitespace_left: usize,
|
||||
/// The column of the beginning of left-most span.
|
||||
/// The column of the beginning of leftmost span.
|
||||
pub span_left: usize,
|
||||
/// The column of the end of right-most span.
|
||||
/// The column of the end of rightmost span.
|
||||
pub span_right: usize,
|
||||
/// The beginning of the line to be displayed.
|
||||
pub computed_left: usize,
|
||||
@ -128,7 +128,7 @@ impl Margin {
|
||||
} else {
|
||||
0
|
||||
};
|
||||
// We want to show as much as possible, max_line_len is the right-most boundary for the
|
||||
// We want to show as much as possible, max_line_len is the rightmost boundary for the
|
||||
// relevant code.
|
||||
self.computed_right = max(max_line_len, self.computed_left);
|
||||
|
||||
@ -685,7 +685,7 @@ impl HumanEmitter {
|
||||
buffer.puts(line_offset, code_offset, "...", Style::LineNumber);
|
||||
}
|
||||
if margin.was_cut_right(line_len) {
|
||||
// We have stripped some code after the right-most span end, make it clear we did so.
|
||||
// We have stripped some code after the rightmost span end, make it clear we did so.
|
||||
buffer.puts(line_offset, code_offset + taken - 3, "...", Style::LineNumber);
|
||||
}
|
||||
buffer.puts(line_offset, 0, &self.maybe_anonymized(line_index), Style::LineNumber);
|
||||
|
@ -25,7 +25,7 @@ expand_collapse_debuginfo_illegal =
|
||||
illegal value for attribute #[collapse_debuginfo(no|external|yes)]
|
||||
|
||||
expand_count_repetition_misplaced =
|
||||
`count` can not be placed inside the inner-most repetition
|
||||
`count` can not be placed inside the innermost repetition
|
||||
|
||||
expand_crate_name_in_cfg_attr =
|
||||
`crate_name` within an `#![cfg_attr]` attribute is forbidden
|
||||
|
@ -23,11 +23,11 @@ pub(crate) enum MetaVarExpr {
|
||||
/// Ignore a meta-variable for repetition without expansion.
|
||||
Ignore(Ident),
|
||||
|
||||
/// The index of the repetition at a particular depth, where 0 is the inner-most
|
||||
/// The index of the repetition at a particular depth, where 0 is the innermost
|
||||
/// repetition. The `usize` is the depth.
|
||||
Index(usize),
|
||||
|
||||
/// The length of the repetition at a particular depth, where 0 is the inner-most
|
||||
/// The length of the repetition at a particular depth, where 0 is the innermost
|
||||
/// repetition. The `usize` is the depth.
|
||||
Len(usize),
|
||||
}
|
||||
|
@ -570,7 +570,7 @@ fn lockstep_iter_size(
|
||||
}
|
||||
}
|
||||
|
||||
/// Used solely by the `count` meta-variable expression, counts the outer-most repetitions at a
|
||||
/// Used solely by the `count` meta-variable expression, counts the outermost repetitions at a
|
||||
/// given optional nested depth.
|
||||
///
|
||||
/// For example, a macro parameter of `$( { $( $foo:ident ),* } )*` called with `{ a, b } { c }`:
|
||||
|
@ -43,14 +43,13 @@ mod refine;
|
||||
/// - `impl_m`: type of the method we are checking
|
||||
/// - `trait_m`: the method in the trait
|
||||
/// - `impl_trait_ref`: the TraitRef corresponding to the trait implementation
|
||||
#[instrument(level = "debug", skip(tcx))]
|
||||
pub(super) fn compare_impl_method<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
impl_m: ty::AssocItem,
|
||||
trait_m: ty::AssocItem,
|
||||
impl_trait_ref: ty::TraitRef<'tcx>,
|
||||
) {
|
||||
debug!("compare_impl_method(impl_trait_ref={:?})", impl_trait_ref);
|
||||
|
||||
let _: Result<_, ErrorGuaranteed> = try {
|
||||
check_method_is_structurally_compatible(tcx, impl_m, trait_m, impl_trait_ref, false)?;
|
||||
compare_method_predicate_entailment(tcx, impl_m, trait_m, impl_trait_ref)?;
|
||||
@ -167,8 +166,6 @@ fn compare_method_predicate_entailment<'tcx>(
|
||||
trait_m: ty::AssocItem,
|
||||
impl_trait_ref: ty::TraitRef<'tcx>,
|
||||
) -> Result<(), ErrorGuaranteed> {
|
||||
let trait_to_impl_args = impl_trait_ref.args;
|
||||
|
||||
// This node-id should be used for the `body_id` field on each
|
||||
// `ObligationCause` (and the `FnCtxt`).
|
||||
//
|
||||
@ -183,27 +180,17 @@ fn compare_method_predicate_entailment<'tcx>(
|
||||
kind: impl_m.kind,
|
||||
});
|
||||
|
||||
// Create mapping from impl to placeholder.
|
||||
let impl_to_placeholder_args = GenericArgs::identity_for_item(tcx, impl_m.def_id);
|
||||
|
||||
// Create mapping from trait to placeholder.
|
||||
let trait_to_placeholder_args =
|
||||
impl_to_placeholder_args.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_args);
|
||||
debug!("compare_impl_method: trait_to_placeholder_args={:?}", trait_to_placeholder_args);
|
||||
// Create mapping from trait method to impl method.
|
||||
let trait_to_impl_args = GenericArgs::identity_for_item(tcx, impl_m.def_id).rebase_onto(
|
||||
tcx,
|
||||
impl_m.container_id(tcx),
|
||||
impl_trait_ref.args,
|
||||
);
|
||||
debug!(?trait_to_impl_args);
|
||||
|
||||
let impl_m_predicates = tcx.predicates_of(impl_m.def_id);
|
||||
let trait_m_predicates = tcx.predicates_of(trait_m.def_id);
|
||||
|
||||
// Create obligations for each predicate declared by the impl
|
||||
// definition in the context of the trait's parameter
|
||||
// environment. We can't just use `impl_env.caller_bounds`,
|
||||
// however, because we want to replace all late-bound regions with
|
||||
// region variables.
|
||||
let impl_predicates = tcx.predicates_of(impl_m_predicates.parent.unwrap());
|
||||
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
|
||||
|
||||
debug!("compare_impl_method: impl_bounds={:?}", hybrid_preds);
|
||||
|
||||
// This is the only tricky bit of the new way we check implementation methods
|
||||
// We need to build a set of predicates where only the method-level bounds
|
||||
// are from the trait and we assume all other bounds from the implementation
|
||||
@ -211,25 +198,25 @@ fn compare_method_predicate_entailment<'tcx>(
|
||||
//
|
||||
// We then register the obligations from the impl_m and check to see
|
||||
// if all constraints hold.
|
||||
hybrid_preds.predicates.extend(
|
||||
trait_m_predicates
|
||||
.instantiate_own(tcx, trait_to_placeholder_args)
|
||||
.map(|(predicate, _)| predicate),
|
||||
let impl_predicates = tcx.predicates_of(impl_m_predicates.parent.unwrap());
|
||||
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx).predicates;
|
||||
hybrid_preds.extend(
|
||||
trait_m_predicates.instantiate_own(tcx, trait_to_impl_args).map(|(predicate, _)| predicate),
|
||||
);
|
||||
|
||||
// Construct trait parameter environment and then shift it into the placeholder viewpoint.
|
||||
// The key step here is to update the caller_bounds's predicates to be
|
||||
// the new hybrid bounds we computed.
|
||||
let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_def_id);
|
||||
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds.predicates), Reveal::UserFacing);
|
||||
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds), Reveal::UserFacing);
|
||||
let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
|
||||
debug!(caller_bounds=?param_env.caller_bounds());
|
||||
|
||||
let infcx = &tcx.infer_ctxt().build();
|
||||
let ocx = ObligationCtxt::new_with_diagnostics(infcx);
|
||||
|
||||
debug!("compare_impl_method: caller_bounds={:?}", param_env.caller_bounds());
|
||||
|
||||
let impl_m_own_bounds = impl_m_predicates.instantiate_own(tcx, impl_to_placeholder_args);
|
||||
// Create obligations for each predicate declared by the impl
|
||||
// definition in the context of the hybrid param-env. This makes
|
||||
// sure that the impl's method's where clauses are not more
|
||||
// restrictive than the trait's method (and the impl itself).
|
||||
let impl_m_own_bounds = impl_m_predicates.instantiate_own_identity();
|
||||
for (predicate, span) in impl_m_own_bounds {
|
||||
let normalize_cause = traits::ObligationCause::misc(span, impl_m_def_id);
|
||||
let predicate = ocx.normalize(&normalize_cause, param_env, predicate);
|
||||
@ -256,7 +243,6 @@ fn compare_method_predicate_entailment<'tcx>(
|
||||
// any associated types appearing in the fn arguments or return
|
||||
// type.
|
||||
|
||||
// Compute placeholder form of impl and trait method tys.
|
||||
let mut wf_tys = FxIndexSet::default();
|
||||
|
||||
let unnormalized_impl_sig = infcx.instantiate_binder_with_fresh_vars(
|
||||
@ -267,9 +253,9 @@ fn compare_method_predicate_entailment<'tcx>(
|
||||
|
||||
let norm_cause = ObligationCause::misc(impl_m_span, impl_m_def_id);
|
||||
let impl_sig = ocx.normalize(&norm_cause, param_env, unnormalized_impl_sig);
|
||||
debug!("compare_impl_method: impl_fty={:?}", impl_sig);
|
||||
debug!(?impl_sig);
|
||||
|
||||
let trait_sig = tcx.fn_sig(trait_m.def_id).instantiate(tcx, trait_to_placeholder_args);
|
||||
let trait_sig = tcx.fn_sig(trait_m.def_id).instantiate(tcx, trait_to_impl_args);
|
||||
let trait_sig = tcx.liberate_late_bound_regions(impl_m.def_id, trait_sig);
|
||||
|
||||
// Next, add all inputs and output as well-formed tys. Importantly,
|
||||
@ -280,9 +266,7 @@ fn compare_method_predicate_entailment<'tcx>(
|
||||
// We also have to add the normalized trait signature
|
||||
// as we don't normalize during implied bounds computation.
|
||||
wf_tys.extend(trait_sig.inputs_and_output.iter());
|
||||
let trait_fty = Ty::new_fn_ptr(tcx, ty::Binder::dummy(trait_sig));
|
||||
|
||||
debug!("compare_impl_method: trait_fty={:?}", trait_fty);
|
||||
debug!(?trait_sig);
|
||||
|
||||
// FIXME: We'd want to keep more accurate spans than "the method signature" when
|
||||
// processing the comparison between the trait and impl fn, but we sadly lose them
|
||||
@ -455,8 +439,6 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
|
||||
// just so we don't ICE during instantiation later.
|
||||
check_method_is_structurally_compatible(tcx, impl_m, trait_m, impl_trait_ref, true)?;
|
||||
|
||||
let trait_to_impl_args = impl_trait_ref.args;
|
||||
|
||||
let impl_m_hir_id = tcx.local_def_id_to_hir_id(impl_m_def_id);
|
||||
let return_span = tcx.hir().fn_decl_by_hir_id(impl_m_hir_id).unwrap().output.span();
|
||||
let cause =
|
||||
@ -466,18 +448,18 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
|
||||
kind: impl_m.kind,
|
||||
});
|
||||
|
||||
// Create mapping from impl to placeholder.
|
||||
let impl_to_placeholder_args = GenericArgs::identity_for_item(tcx, impl_m.def_id);
|
||||
|
||||
// Create mapping from trait to placeholder.
|
||||
let trait_to_placeholder_args =
|
||||
impl_to_placeholder_args.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_args);
|
||||
// Create mapping from trait to impl (i.e. impl trait header + impl method identity args).
|
||||
let trait_to_impl_args = GenericArgs::identity_for_item(tcx, impl_m.def_id).rebase_onto(
|
||||
tcx,
|
||||
impl_m.container_id(tcx),
|
||||
impl_trait_ref.args,
|
||||
);
|
||||
|
||||
let hybrid_preds = tcx
|
||||
.predicates_of(impl_m.container_id(tcx))
|
||||
.instantiate_identity(tcx)
|
||||
.into_iter()
|
||||
.chain(tcx.predicates_of(trait_m.def_id).instantiate_own(tcx, trait_to_placeholder_args))
|
||||
.chain(tcx.predicates_of(trait_m.def_id).instantiate_own(tcx, trait_to_impl_args))
|
||||
.map(|(clause, _)| clause);
|
||||
let param_env = ty::ParamEnv::new(tcx.mk_clauses_from_iter(hybrid_preds), Reveal::UserFacing);
|
||||
let param_env = traits::normalize_param_env_or_error(
|
||||
@ -511,7 +493,7 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
|
||||
.instantiate_binder_with_fresh_vars(
|
||||
return_span,
|
||||
infer::HigherRankedType,
|
||||
tcx.fn_sig(trait_m.def_id).instantiate(tcx, trait_to_placeholder_args),
|
||||
tcx.fn_sig(trait_m.def_id).instantiate(tcx, trait_to_impl_args),
|
||||
)
|
||||
.fold_with(&mut collector);
|
||||
|
||||
@ -705,7 +687,7 @@ pub(super) fn collect_return_position_impl_trait_in_trait_tys<'tcx>(
|
||||
// Also, we only need to account for a difference in trait and impl args,
|
||||
// since we previously enforce that the trait method and impl method have the
|
||||
// same generics.
|
||||
let num_trait_args = trait_to_impl_args.len();
|
||||
let num_trait_args = impl_trait_ref.args.len();
|
||||
let num_impl_args = tcx.generics_of(impl_m.container_id(tcx)).own_params.len();
|
||||
let ty = match ty.try_fold_with(&mut RemapHiddenTyRegions {
|
||||
tcx,
|
||||
@ -1041,12 +1023,7 @@ fn check_region_bounds_on_impl_item<'tcx>(
|
||||
let trait_generics = tcx.generics_of(trait_m.def_id);
|
||||
let trait_params = trait_generics.own_counts().lifetimes;
|
||||
|
||||
debug!(
|
||||
"check_region_bounds_on_impl_item: \
|
||||
trait_generics={:?} \
|
||||
impl_generics={:?}",
|
||||
trait_generics, impl_generics
|
||||
);
|
||||
debug!(?trait_generics, ?impl_generics);
|
||||
|
||||
// Must have same number of early-bound lifetime parameters.
|
||||
// Unfortunately, if the user screws up the bounds, then this
|
||||
@ -1710,8 +1687,7 @@ pub(super) fn compare_impl_const_raw(
|
||||
let trait_const_item = tcx.associated_item(trait_const_item_def);
|
||||
let impl_trait_ref =
|
||||
tcx.impl_trait_ref(impl_const_item.container_id(tcx)).unwrap().instantiate_identity();
|
||||
|
||||
debug!("compare_impl_const(impl_trait_ref={:?})", impl_trait_ref);
|
||||
debug!(?impl_trait_ref);
|
||||
|
||||
compare_number_of_generics(tcx, impl_const_item, trait_const_item, false)?;
|
||||
compare_generic_param_kinds(tcx, impl_const_item, trait_const_item, false)?;
|
||||
@ -1722,6 +1698,7 @@ pub(super) fn compare_impl_const_raw(
|
||||
/// The equivalent of [compare_method_predicate_entailment], but for associated constants
|
||||
/// instead of associated functions.
|
||||
// FIXME(generic_const_items): If possible extract the common parts of `compare_{type,const}_predicate_entailment`.
|
||||
#[instrument(level = "debug", skip(tcx))]
|
||||
fn compare_const_predicate_entailment<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
impl_ct: ty::AssocItem,
|
||||
@ -1736,13 +1713,14 @@ fn compare_const_predicate_entailment<'tcx>(
|
||||
// because we shouldn't really have to deal with lifetimes or
|
||||
// predicates. In fact some of this should probably be put into
|
||||
// shared functions because of DRY violations...
|
||||
let impl_args = GenericArgs::identity_for_item(tcx, impl_ct.def_id);
|
||||
let trait_to_impl_args =
|
||||
impl_args.rebase_onto(tcx, impl_ct.container_id(tcx), impl_trait_ref.args);
|
||||
let trait_to_impl_args = GenericArgs::identity_for_item(tcx, impl_ct.def_id).rebase_onto(
|
||||
tcx,
|
||||
impl_ct.container_id(tcx),
|
||||
impl_trait_ref.args,
|
||||
);
|
||||
|
||||
// Create a parameter environment that represents the implementation's
|
||||
// method.
|
||||
// Compute placeholder form of impl and trait const tys.
|
||||
// associated const.
|
||||
let impl_ty = tcx.type_of(impl_ct_def_id).instantiate_identity();
|
||||
|
||||
let trait_ty = tcx.type_of(trait_ct.def_id).instantiate(tcx, trait_to_impl_args);
|
||||
@ -1759,14 +1737,14 @@ fn compare_const_predicate_entailment<'tcx>(
|
||||
// The predicates declared by the impl definition, the trait and the
|
||||
// associated const in the trait are assumed.
|
||||
let impl_predicates = tcx.predicates_of(impl_ct_predicates.parent.unwrap());
|
||||
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
|
||||
hybrid_preds.predicates.extend(
|
||||
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx).predicates;
|
||||
hybrid_preds.extend(
|
||||
trait_ct_predicates
|
||||
.instantiate_own(tcx, trait_to_impl_args)
|
||||
.map(|(predicate, _)| predicate),
|
||||
);
|
||||
|
||||
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds.predicates), Reveal::UserFacing);
|
||||
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds), Reveal::UserFacing);
|
||||
let param_env = traits::normalize_param_env_or_error(
|
||||
tcx,
|
||||
param_env,
|
||||
@ -1776,7 +1754,7 @@ fn compare_const_predicate_entailment<'tcx>(
|
||||
let infcx = tcx.infer_ctxt().build();
|
||||
let ocx = ObligationCtxt::new_with_diagnostics(&infcx);
|
||||
|
||||
let impl_ct_own_bounds = impl_ct_predicates.instantiate_own(tcx, impl_args);
|
||||
let impl_ct_own_bounds = impl_ct_predicates.instantiate_own_identity();
|
||||
for (predicate, span) in impl_ct_own_bounds {
|
||||
let cause = ObligationCause::misc(span, impl_ct_def_id);
|
||||
let predicate = ocx.normalize(&cause, param_env, predicate);
|
||||
@ -1787,20 +1765,15 @@ fn compare_const_predicate_entailment<'tcx>(
|
||||
|
||||
// There is no "body" here, so just pass dummy id.
|
||||
let impl_ty = ocx.normalize(&cause, param_env, impl_ty);
|
||||
|
||||
debug!("compare_const_impl: impl_ty={:?}", impl_ty);
|
||||
debug!(?impl_ty);
|
||||
|
||||
let trait_ty = ocx.normalize(&cause, param_env, trait_ty);
|
||||
|
||||
debug!("compare_const_impl: trait_ty={:?}", trait_ty);
|
||||
debug!(?trait_ty);
|
||||
|
||||
let err = ocx.sup(&cause, param_env, trait_ty, impl_ty);
|
||||
|
||||
if let Err(terr) = err {
|
||||
debug!(
|
||||
"checking associated const for compatibility: impl ty {:?}, trait ty {:?}",
|
||||
impl_ty, trait_ty
|
||||
);
|
||||
debug!(?impl_ty, ?trait_ty);
|
||||
|
||||
// Locate the Span containing just the type of the offending impl
|
||||
let (ty, _) = tcx.hir().expect_impl_item(impl_ct_def_id).expect_const();
|
||||
@ -1845,14 +1818,13 @@ fn compare_const_predicate_entailment<'tcx>(
|
||||
ocx.resolve_regions_and_report_errors(impl_ct_def_id, &outlives_env)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(tcx))]
|
||||
pub(super) fn compare_impl_ty<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
impl_ty: ty::AssocItem,
|
||||
trait_ty: ty::AssocItem,
|
||||
impl_trait_ref: ty::TraitRef<'tcx>,
|
||||
) {
|
||||
debug!("compare_impl_type(impl_trait_ref={:?})", impl_trait_ref);
|
||||
|
||||
let _: Result<(), ErrorGuaranteed> = try {
|
||||
compare_number_of_generics(tcx, impl_ty, trait_ty, false)?;
|
||||
compare_generic_param_kinds(tcx, impl_ty, trait_ty, false)?;
|
||||
@ -1864,20 +1836,23 @@ pub(super) fn compare_impl_ty<'tcx>(
|
||||
|
||||
/// The equivalent of [compare_method_predicate_entailment], but for associated types
|
||||
/// instead of associated functions.
|
||||
#[instrument(level = "debug", skip(tcx))]
|
||||
fn compare_type_predicate_entailment<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
impl_ty: ty::AssocItem,
|
||||
trait_ty: ty::AssocItem,
|
||||
impl_trait_ref: ty::TraitRef<'tcx>,
|
||||
) -> Result<(), ErrorGuaranteed> {
|
||||
let impl_args = GenericArgs::identity_for_item(tcx, impl_ty.def_id);
|
||||
let trait_to_impl_args =
|
||||
impl_args.rebase_onto(tcx, impl_ty.container_id(tcx), impl_trait_ref.args);
|
||||
let trait_to_impl_args = GenericArgs::identity_for_item(tcx, impl_ty.def_id).rebase_onto(
|
||||
tcx,
|
||||
impl_ty.container_id(tcx),
|
||||
impl_trait_ref.args,
|
||||
);
|
||||
|
||||
let impl_ty_predicates = tcx.predicates_of(impl_ty.def_id);
|
||||
let trait_ty_predicates = tcx.predicates_of(trait_ty.def_id);
|
||||
|
||||
let impl_ty_own_bounds = impl_ty_predicates.instantiate_own(tcx, impl_args);
|
||||
let impl_ty_own_bounds = impl_ty_predicates.instantiate_own_identity();
|
||||
if impl_ty_own_bounds.len() == 0 {
|
||||
// Nothing to check.
|
||||
return Ok(());
|
||||
@ -1887,29 +1862,29 @@ fn compare_type_predicate_entailment<'tcx>(
|
||||
// `ObligationCause` (and the `FnCtxt`). This is what
|
||||
// `regionck_item` expects.
|
||||
let impl_ty_def_id = impl_ty.def_id.expect_local();
|
||||
debug!("compare_type_predicate_entailment: trait_to_impl_args={:?}", trait_to_impl_args);
|
||||
debug!(?trait_to_impl_args);
|
||||
|
||||
// The predicates declared by the impl definition, the trait and the
|
||||
// associated type in the trait are assumed.
|
||||
let impl_predicates = tcx.predicates_of(impl_ty_predicates.parent.unwrap());
|
||||
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
|
||||
hybrid_preds.predicates.extend(
|
||||
let mut hybrid_preds = impl_predicates.instantiate_identity(tcx).predicates;
|
||||
hybrid_preds.extend(
|
||||
trait_ty_predicates
|
||||
.instantiate_own(tcx, trait_to_impl_args)
|
||||
.map(|(predicate, _)| predicate),
|
||||
);
|
||||
|
||||
debug!("compare_type_predicate_entailment: bounds={:?}", hybrid_preds);
|
||||
debug!(?hybrid_preds);
|
||||
|
||||
let impl_ty_span = tcx.def_span(impl_ty_def_id);
|
||||
let normalize_cause = ObligationCause::misc(impl_ty_span, impl_ty_def_id);
|
||||
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds.predicates), Reveal::UserFacing);
|
||||
|
||||
let param_env = ty::ParamEnv::new(tcx.mk_clauses(&hybrid_preds), Reveal::UserFacing);
|
||||
let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
|
||||
debug!(caller_bounds=?param_env.caller_bounds());
|
||||
|
||||
let infcx = tcx.infer_ctxt().build();
|
||||
let ocx = ObligationCtxt::new_with_diagnostics(&infcx);
|
||||
|
||||
debug!("compare_type_predicate_entailment: caller_bounds={:?}", param_env.caller_bounds());
|
||||
|
||||
for (predicate, span) in impl_ty_own_bounds {
|
||||
let cause = ObligationCause::misc(span, impl_ty_def_id);
|
||||
let predicate = ocx.normalize(&cause, param_env, predicate);
|
||||
@ -2009,11 +1984,11 @@ pub(super) fn check_type_bounds<'tcx>(
|
||||
.explicit_item_bounds(trait_ty.def_id)
|
||||
.iter_instantiated_copied(tcx, rebased_args)
|
||||
.map(|(concrete_ty_bound, span)| {
|
||||
debug!("check_type_bounds: concrete_ty_bound = {:?}", concrete_ty_bound);
|
||||
debug!(?concrete_ty_bound);
|
||||
traits::Obligation::new(tcx, mk_cause(span), param_env, concrete_ty_bound)
|
||||
})
|
||||
.collect();
|
||||
debug!("check_type_bounds: item_bounds={:?}", obligations);
|
||||
debug!(item_bounds=?obligations);
|
||||
|
||||
// Normalize predicates with the assumption that the GAT may always normalize
|
||||
// to its definition type. This should be the param-env we use to *prove* the
|
||||
@ -2032,7 +2007,7 @@ pub(super) fn check_type_bounds<'tcx>(
|
||||
} else {
|
||||
ocx.normalize(&normalize_cause, normalize_param_env, obligation.predicate)
|
||||
};
|
||||
debug!("compare_projection_bounds: normalized predicate = {:?}", normalized_predicate);
|
||||
debug!(?normalized_predicate);
|
||||
obligation.predicate = normalized_predicate;
|
||||
|
||||
ocx.register_obligation(obligation);
|
||||
|
@ -2457,7 +2457,7 @@ fn truncate_capture_for_optimization(
|
||||
) -> (Place<'_>, ty::UpvarCapture) {
|
||||
let is_shared_ref = |ty: Ty<'_>| matches!(ty.kind(), ty::Ref(.., hir::Mutability::Not));
|
||||
|
||||
// Find the right-most deref (if any). All the projections that come after this
|
||||
// Find the rightmost deref (if any). All the projections that come after this
|
||||
// are fields or other "in-place pointer adjustments"; these refer therefore to
|
||||
// data owned by whatever pointer is being dereferenced here.
|
||||
let idx = place.projections.iter().rposition(|proj| ProjectionKind::Deref == proj.kind);
|
||||
|
@ -395,7 +395,9 @@ impl<'tcx> GenericPredicates<'tcx> {
|
||||
EarlyBinder::bind(self.predicates).iter_instantiated_copied(tcx, args)
|
||||
}
|
||||
|
||||
pub fn instantiate_own_identity(self) -> impl Iterator<Item = (Clause<'tcx>, Span)> {
|
||||
pub fn instantiate_own_identity(
|
||||
self,
|
||||
) -> impl Iterator<Item = (Clause<'tcx>, Span)> + DoubleEndedIterator + ExactSizeIterator {
|
||||
EarlyBinder::bind(self.predicates).iter_identity_copied()
|
||||
}
|
||||
|
||||
|
@ -1048,8 +1048,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
// | +------------|outer_scope cache|--+ |
|
||||
// +------------------------------|middle_scope cache|------+
|
||||
//
|
||||
// Now, a new, inner-most scope is added along with a new drop into
|
||||
// both inner-most and outer-most scopes:
|
||||
// Now, a new, innermost scope is added along with a new drop into
|
||||
// both innermost and outermost scopes:
|
||||
//
|
||||
// +------------------------------------------------------------+
|
||||
// | +----------------------------------+ |
|
||||
@ -1061,11 +1061,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||
// +----=----------------|invalid middle_scope cache|-----------+
|
||||
//
|
||||
// If, when adding `drop(new)` we do not invalidate the cached blocks for both
|
||||
// outer_scope and middle_scope, then, when building drops for the inner (right-most)
|
||||
// outer_scope and middle_scope, then, when building drops for the inner (rightmost)
|
||||
// scope, the old, cached blocks, without `drop(new)` will get used, producing the
|
||||
// wrong results.
|
||||
//
|
||||
// Note that this code iterates scopes from the inner-most to the outer-most,
|
||||
// Note that this code iterates scopes from the innermost to the outermost,
|
||||
// invalidating caches of each scope visited. This way bare minimum of the
|
||||
// caches gets invalidated. i.e., if a new drop is added into the middle scope, the
|
||||
// cache of outer scope stays intact.
|
||||
|
@ -1177,7 +1177,7 @@ struct PlaceInfo<'tcx> {
|
||||
/// The projection used to go from parent to this node (only None for root).
|
||||
proj_elem: Option<TrackElem>,
|
||||
|
||||
/// The left-most child.
|
||||
/// The leftmost child.
|
||||
first_child: Option<PlaceIndex>,
|
||||
|
||||
/// Index of the sibling to the right of this node.
|
||||
|
@ -85,7 +85,7 @@ pub(super) fn report_suspicious_mismatch_block(
|
||||
}
|
||||
}
|
||||
|
||||
// Find the inner-most span candidate for final report
|
||||
// Find the innermost span candidate for final report
|
||||
let candidate_span =
|
||||
matched_spans.into_iter().rev().find(|&(_, same_ident)| !same_ident).map(|(span, _)| span);
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
|
||||
use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
||||
use crate::spec::HasTargetSpec;
|
||||
use crate::spec::abi::Abi as SpecAbi;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
enum RegPassKind {
|
||||
@ -359,3 +360,30 @@ where
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
|
||||
where
|
||||
Ty: TyAbiInterface<'a, C> + Copy,
|
||||
C: HasDataLayout + HasTargetSpec,
|
||||
{
|
||||
if abi == SpecAbi::RustIntrinsic {
|
||||
return;
|
||||
}
|
||||
|
||||
let grlen = cx.data_layout().pointer_size.bits();
|
||||
|
||||
for arg in fn_abi.args.iter_mut() {
|
||||
if arg.is_ignore() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// LLVM integers types do not differentiate between signed or unsigned integers.
|
||||
// Some LoongArch instructions do not have a `.w` suffix version, they use all the
|
||||
// GRLEN bits. By explicitly setting the `signext` or `zeroext` attribute
|
||||
// according to signedness to avoid unnecessary integer extending instructions.
|
||||
//
|
||||
// This is similar to the RISC-V case, see
|
||||
// https://github.com/rust-lang/rust/issues/114508 for details.
|
||||
extend_integer_width(arg, grlen);
|
||||
}
|
||||
}
|
||||
|
@ -1,11 +1,14 @@
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use std::{fmt, iter};
|
||||
|
||||
pub use rustc_abi::{Reg, RegKind};
|
||||
use rustc_macros::HashStable_Generic;
|
||||
use rustc_span::Symbol;
|
||||
|
||||
use crate::abi::{self, Abi, Align, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
||||
use crate::abi::{
|
||||
self, Abi, AddressSpace, Align, HasDataLayout, Pointer, Size, TyAbiInterface, TyAndLayout,
|
||||
};
|
||||
use crate::spec::abi::Abi as SpecAbi;
|
||||
use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
|
||||
|
||||
mod aarch64;
|
||||
@ -720,6 +723,118 @@ impl<'a, Ty> FnAbi<'a, Ty> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn adjust_for_rust_abi<C>(&mut self, cx: &C, abi: SpecAbi)
|
||||
where
|
||||
Ty: TyAbiInterface<'a, C> + Copy,
|
||||
C: HasDataLayout + HasTargetSpec,
|
||||
{
|
||||
let spec = cx.target_spec();
|
||||
match &spec.arch[..] {
|
||||
"x86" => x86::compute_rust_abi_info(cx, self, abi),
|
||||
"riscv32" | "riscv64" => riscv::compute_rust_abi_info(cx, self, abi),
|
||||
"loongarch64" => loongarch::compute_rust_abi_info(cx, self, abi),
|
||||
_ => {}
|
||||
};
|
||||
|
||||
for (arg_idx, arg) in self
|
||||
.args
|
||||
.iter_mut()
|
||||
.enumerate()
|
||||
.map(|(idx, arg)| (Some(idx), arg))
|
||||
.chain(iter::once((None, &mut self.ret)))
|
||||
{
|
||||
if arg.is_ignore() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if arg_idx.is_none() && arg.layout.size > Pointer(AddressSpace::DATA).size(cx) * 2 {
|
||||
// Return values larger than 2 registers using a return area
|
||||
// pointer. LLVM and Cranelift disagree about how to return
|
||||
// values that don't fit in the registers designated for return
|
||||
// values. LLVM will force the entire return value to be passed
|
||||
// by return area pointer, while Cranelift will look at each IR level
|
||||
// return value independently and decide to pass it in a
|
||||
// register or not, which would result in the return value
|
||||
// being passed partially in registers and partially through a
|
||||
// return area pointer.
|
||||
//
|
||||
// While Cranelift may need to be fixed as the LLVM behavior is
|
||||
// generally more correct with respect to the surface language,
|
||||
// forcing this behavior in rustc itself makes it easier for
|
||||
// other backends to conform to the Rust ABI and for the C ABI
|
||||
// rustc already handles this behavior anyway.
|
||||
//
|
||||
// In addition LLVM's decision to pass the return value in
|
||||
// registers or using a return area pointer depends on how
|
||||
// exactly the return type is lowered to an LLVM IR type. For
|
||||
// example `Option<u128>` can be lowered as `{ i128, i128 }`
|
||||
// in which case the x86_64 backend would use a return area
|
||||
// pointer, or it could be passed as `{ i32, i128 }` in which
|
||||
// case the x86_64 backend would pass it in registers by taking
|
||||
// advantage of an LLVM ABI extension that allows using 3
|
||||
// registers for the x86_64 sysv call conv rather than the
|
||||
// officially specified 2 registers.
|
||||
//
|
||||
// FIXME: Technically we should look at the amount of available
|
||||
// return registers rather than guessing that there are 2
|
||||
// registers for return values. In practice only a couple of
|
||||
// architectures have less than 2 return registers. None of
|
||||
// which supported by Cranelift.
|
||||
//
|
||||
// NOTE: This adjustment is only necessary for the Rust ABI as
|
||||
// for other ABI's the calling convention implementations in
|
||||
// rustc_target already ensure any return value which doesn't
|
||||
// fit in the available amount of return registers is passed in
|
||||
// the right way for the current target.
|
||||
arg.make_indirect();
|
||||
continue;
|
||||
}
|
||||
|
||||
match arg.layout.abi {
|
||||
Abi::Aggregate { .. } => {}
|
||||
|
||||
// This is a fun case! The gist of what this is doing is
|
||||
// that we want callers and callees to always agree on the
|
||||
// ABI of how they pass SIMD arguments. If we were to *not*
|
||||
// make these arguments indirect then they'd be immediates
|
||||
// in LLVM, which means that they'd used whatever the
|
||||
// appropriate ABI is for the callee and the caller. That
|
||||
// means, for example, if the caller doesn't have AVX
|
||||
// enabled but the callee does, then passing an AVX argument
|
||||
// across this boundary would cause corrupt data to show up.
|
||||
//
|
||||
// This problem is fixed by unconditionally passing SIMD
|
||||
// arguments through memory between callers and callees
|
||||
// which should get them all to agree on ABI regardless of
|
||||
// target feature sets. Some more information about this
|
||||
// issue can be found in #44367.
|
||||
//
|
||||
// Note that the intrinsic ABI is exempt here as
|
||||
// that's how we connect up to LLVM and it's unstable
|
||||
// anyway, we control all calls to it in libstd.
|
||||
Abi::Vector { .. } if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => {
|
||||
arg.make_indirect();
|
||||
continue;
|
||||
}
|
||||
|
||||
_ => continue,
|
||||
}
|
||||
// Compute `Aggregate` ABI.
|
||||
|
||||
let is_indirect_not_on_stack =
|
||||
matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
|
||||
assert!(is_indirect_not_on_stack);
|
||||
|
||||
let size = arg.layout.size;
|
||||
if !arg.layout.is_unsized() && size <= Pointer(AddressSpace::DATA).size(cx) {
|
||||
// We want to pass small aggregates as immediates, but using
|
||||
// an LLVM aggregate type for this leads to bad optimizations,
|
||||
// so we pick an appropriately sized integer type instead.
|
||||
arg.cast_to(Reg { kind: RegKind::Integer, size });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Conv {
|
||||
|
@ -7,6 +7,7 @@
|
||||
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
|
||||
use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
||||
use crate::spec::HasTargetSpec;
|
||||
use crate::spec::abi::Abi as SpecAbi;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
enum RegPassKind {
|
||||
@ -365,3 +366,29 @@ where
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
|
||||
where
|
||||
Ty: TyAbiInterface<'a, C> + Copy,
|
||||
C: HasDataLayout + HasTargetSpec,
|
||||
{
|
||||
if abi == SpecAbi::RustIntrinsic {
|
||||
return;
|
||||
}
|
||||
|
||||
let xlen = cx.data_layout().pointer_size.bits();
|
||||
|
||||
for arg in fn_abi.args.iter_mut() {
|
||||
if arg.is_ignore() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// LLVM integers types do not differentiate between signed or unsigned integers.
|
||||
// Some RISC-V instructions do not have a `.w` suffix version, they use all the
|
||||
// XLEN bits. By explicitly setting the `signext` or `zeroext` attribute
|
||||
// according to signedness to avoid unnecessary integer extending instructions.
|
||||
//
|
||||
// See https://github.com/rust-lang/rust/issues/114508 for details.
|
||||
extend_integer_width(arg, xlen);
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,9 @@
|
||||
use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
|
||||
use crate::abi::{Abi, Align, HasDataLayout, TyAbiInterface, TyAndLayout};
|
||||
use crate::abi::{
|
||||
Abi, AddressSpace, Align, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
|
||||
};
|
||||
use crate::spec::HasTargetSpec;
|
||||
use crate::spec::abi::Abi as SpecAbi;
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub(crate) enum Flavor {
|
||||
@ -207,3 +210,35 @@ pub(crate) fn fill_inregs<'a, Ty, C>(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn compute_rust_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, abi: SpecAbi)
|
||||
where
|
||||
Ty: TyAbiInterface<'a, C> + Copy,
|
||||
C: HasDataLayout + HasTargetSpec,
|
||||
{
|
||||
// Avoid returning floats in x87 registers on x86 as loading and storing from x87
|
||||
// registers will quiet signalling NaNs. Also avoid using SSE registers since they
|
||||
// are not always available (depending on target features).
|
||||
if !fn_abi.ret.is_ignore()
|
||||
// Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs.
|
||||
&& abi != SpecAbi::RustIntrinsic
|
||||
{
|
||||
let has_float = match fn_abi.ret.layout.abi {
|
||||
Abi::Scalar(s) => matches!(s.primitive(), Float(_)),
|
||||
Abi::ScalarPair(s1, s2) => {
|
||||
matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
|
||||
}
|
||||
_ => false, // anyway not passed via registers on x86
|
||||
};
|
||||
if has_float {
|
||||
if fn_abi.ret.layout.size <= Pointer(AddressSpace::DATA).size(cx) {
|
||||
// Same size or smaller than pointer, return in a register.
|
||||
fn_abi.ret.cast_to(Reg { kind: RegKind::Integer, size: fn_abi.ret.layout.size });
|
||||
} else {
|
||||
// Larger than a pointer, return indirectly.
|
||||
fn_abi.ret.make_indirect();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
use std::iter;
|
||||
|
||||
use rustc_abi::Primitive::{Float, Pointer};
|
||||
use rustc_abi::{Abi, AddressSpace, PointerKind, Scalar, Size};
|
||||
use rustc_abi::Primitive::Pointer;
|
||||
use rustc_abi::{Abi, PointerKind, Scalar, Size};
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::lang_items::LangItem;
|
||||
use rustc_middle::bug;
|
||||
@ -13,8 +13,7 @@ use rustc_middle::ty::{self, InstanceKind, Ty, TyCtxt};
|
||||
use rustc_session::config::OptLevel;
|
||||
use rustc_span::def_id::DefId;
|
||||
use rustc_target::abi::call::{
|
||||
ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
|
||||
RiscvInterruptKind,
|
||||
ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, RiscvInterruptKind,
|
||||
};
|
||||
use rustc_target::spec::abi::Abi as SpecAbi;
|
||||
use tracing::debug;
|
||||
@ -678,6 +677,8 @@ fn fn_abi_adjust_for_abi<'tcx>(
|
||||
let tcx = cx.tcx();
|
||||
|
||||
if abi == SpecAbi::Rust || abi == SpecAbi::RustCall || abi == SpecAbi::RustIntrinsic {
|
||||
fn_abi.adjust_for_rust_abi(cx, abi);
|
||||
|
||||
// Look up the deduced parameter attributes for this function, if we have its def ID and
|
||||
// we're optimizing in non-incremental mode. We'll tag its parameters with those attributes
|
||||
// as appropriate.
|
||||
@ -688,125 +689,9 @@ fn fn_abi_adjust_for_abi<'tcx>(
|
||||
&[]
|
||||
};
|
||||
|
||||
let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>, arg_idx: Option<usize>| {
|
||||
for (arg_idx, arg) in fn_abi.args.iter_mut().enumerate() {
|
||||
if arg.is_ignore() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Avoid returning floats in x87 registers on x86 as loading and storing from x87
|
||||
// registers will quiet signalling NaNs. Also avoid using SSE registers since they
|
||||
// are not always available (depending on target features).
|
||||
if tcx.sess.target.arch == "x86"
|
||||
&& arg_idx.is_none()
|
||||
// Intrinsics themselves are not actual "real" functions, so theres no need to
|
||||
// change their ABIs.
|
||||
&& abi != SpecAbi::RustIntrinsic
|
||||
{
|
||||
let has_float = match arg.layout.abi {
|
||||
Abi::Scalar(s) => matches!(s.primitive(), Float(_)),
|
||||
Abi::ScalarPair(s1, s2) => {
|
||||
matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
|
||||
}
|
||||
_ => false, // anyway not passed via registers on x86
|
||||
};
|
||||
if has_float {
|
||||
if arg.layout.size <= Pointer(AddressSpace::DATA).size(cx) {
|
||||
// Same size or smaller than pointer, return in a register.
|
||||
arg.cast_to(Reg { kind: RegKind::Integer, size: arg.layout.size });
|
||||
} else {
|
||||
// Larger than a pointer, return indirectly.
|
||||
arg.make_indirect();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if arg_idx.is_none() && arg.layout.size > Pointer(AddressSpace::DATA).size(cx) * 2 {
|
||||
// Return values larger than 2 registers using a return area
|
||||
// pointer. LLVM and Cranelift disagree about how to return
|
||||
// values that don't fit in the registers designated for return
|
||||
// values. LLVM will force the entire return value to be passed
|
||||
// by return area pointer, while Cranelift will look at each IR level
|
||||
// return value independently and decide to pass it in a
|
||||
// register or not, which would result in the return value
|
||||
// being passed partially in registers and partially through a
|
||||
// return area pointer.
|
||||
//
|
||||
// While Cranelift may need to be fixed as the LLVM behavior is
|
||||
// generally more correct with respect to the surface language,
|
||||
// forcing this behavior in rustc itself makes it easier for
|
||||
// other backends to conform to the Rust ABI and for the C ABI
|
||||
// rustc already handles this behavior anyway.
|
||||
//
|
||||
// In addition LLVM's decision to pass the return value in
|
||||
// registers or using a return area pointer depends on how
|
||||
// exactly the return type is lowered to an LLVM IR type. For
|
||||
// example `Option<u128>` can be lowered as `{ i128, i128 }`
|
||||
// in which case the x86_64 backend would use a return area
|
||||
// pointer, or it could be passed as `{ i32, i128 }` in which
|
||||
// case the x86_64 backend would pass it in registers by taking
|
||||
// advantage of an LLVM ABI extension that allows using 3
|
||||
// registers for the x86_64 sysv call conv rather than the
|
||||
// officially specified 2 registers.
|
||||
//
|
||||
// FIXME: Technically we should look at the amount of available
|
||||
// return registers rather than guessing that there are 2
|
||||
// registers for return values. In practice only a couple of
|
||||
// architectures have less than 2 return registers. None of
|
||||
// which supported by Cranelift.
|
||||
//
|
||||
// NOTE: This adjustment is only necessary for the Rust ABI as
|
||||
// for other ABI's the calling convention implementations in
|
||||
// rustc_target already ensure any return value which doesn't
|
||||
// fit in the available amount of return registers is passed in
|
||||
// the right way for the current target.
|
||||
arg.make_indirect();
|
||||
return;
|
||||
}
|
||||
|
||||
match arg.layout.abi {
|
||||
Abi::Aggregate { .. } => {}
|
||||
|
||||
// This is a fun case! The gist of what this is doing is
|
||||
// that we want callers and callees to always agree on the
|
||||
// ABI of how they pass SIMD arguments. If we were to *not*
|
||||
// make these arguments indirect then they'd be immediates
|
||||
// in LLVM, which means that they'd used whatever the
|
||||
// appropriate ABI is for the callee and the caller. That
|
||||
// means, for example, if the caller doesn't have AVX
|
||||
// enabled but the callee does, then passing an AVX argument
|
||||
// across this boundary would cause corrupt data to show up.
|
||||
//
|
||||
// This problem is fixed by unconditionally passing SIMD
|
||||
// arguments through memory between callers and callees
|
||||
// which should get them all to agree on ABI regardless of
|
||||
// target feature sets. Some more information about this
|
||||
// issue can be found in #44367.
|
||||
//
|
||||
// Note that the intrinsic ABI is exempt here as
|
||||
// that's how we connect up to LLVM and it's unstable
|
||||
// anyway, we control all calls to it in libstd.
|
||||
Abi::Vector { .. }
|
||||
if abi != SpecAbi::RustIntrinsic && tcx.sess.target.simd_types_indirect =>
|
||||
{
|
||||
arg.make_indirect();
|
||||
return;
|
||||
}
|
||||
|
||||
_ => return,
|
||||
}
|
||||
// Compute `Aggregate` ABI.
|
||||
|
||||
let is_indirect_not_on_stack =
|
||||
matches!(arg.mode, PassMode::Indirect { on_stack: false, .. });
|
||||
assert!(is_indirect_not_on_stack, "{:?}", arg);
|
||||
|
||||
let size = arg.layout.size;
|
||||
if !arg.layout.is_unsized() && size <= Pointer(AddressSpace::DATA).size(cx) {
|
||||
// We want to pass small aggregates as immediates, but using
|
||||
// an LLVM aggregate type for this leads to bad optimizations,
|
||||
// so we pick an appropriately sized integer type instead.
|
||||
arg.cast_to(Reg { kind: RegKind::Integer, size });
|
||||
continue;
|
||||
}
|
||||
|
||||
// If we deduced that this parameter was read-only, add that to the attribute list now.
|
||||
@ -814,9 +699,7 @@ fn fn_abi_adjust_for_abi<'tcx>(
|
||||
// The `readonly` parameter only applies to pointers, so we can only do this if the
|
||||
// argument was passed indirectly. (If the argument is passed directly, it's an SSA
|
||||
// value, so it's implicitly immutable.)
|
||||
if let (Some(arg_idx), &mut PassMode::Indirect { ref mut attrs, .. }) =
|
||||
(arg_idx, &mut arg.mode)
|
||||
{
|
||||
if let &mut PassMode::Indirect { ref mut attrs, .. } = &mut arg.mode {
|
||||
// The `deduced_param_attrs` list could be empty if this is a type of function
|
||||
// we can't deduce any parameters for, so make sure the argument index is in
|
||||
// bounds.
|
||||
@ -827,11 +710,6 @@ fn fn_abi_adjust_for_abi<'tcx>(
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
fixup(&mut fn_abi.ret, None);
|
||||
for (arg_idx, arg) in fn_abi.args.iter_mut().enumerate() {
|
||||
fixup(arg, Some(arg_idx));
|
||||
}
|
||||
} else {
|
||||
fn_abi
|
||||
|
@ -496,8 +496,8 @@ where
|
||||
|
||||
/// Similar to [`instantiate_identity`](EarlyBinder::instantiate_identity),
|
||||
/// but on an iterator of values that deref to a `TypeFoldable`.
|
||||
pub fn iter_identity_copied(self) -> impl Iterator<Item = <Iter::Item as Deref>::Target> {
|
||||
self.value.into_iter().map(|v| *v)
|
||||
pub fn iter_identity_copied(self) -> IterIdentityCopied<Iter> {
|
||||
IterIdentityCopied { it: self.value.into_iter() }
|
||||
}
|
||||
}
|
||||
|
||||
@ -546,6 +546,44 @@ where
|
||||
{
|
||||
}
|
||||
|
||||
pub struct IterIdentityCopied<Iter: IntoIterator> {
|
||||
it: Iter::IntoIter,
|
||||
}
|
||||
|
||||
impl<Iter: IntoIterator> Iterator for IterIdentityCopied<Iter>
|
||||
where
|
||||
Iter::Item: Deref,
|
||||
<Iter::Item as Deref>::Target: Copy,
|
||||
{
|
||||
type Item = <Iter::Item as Deref>::Target;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.it.next().map(|i| *i)
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
self.it.size_hint()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Iter: IntoIterator> DoubleEndedIterator for IterIdentityCopied<Iter>
|
||||
where
|
||||
Iter::IntoIter: DoubleEndedIterator,
|
||||
Iter::Item: Deref,
|
||||
<Iter::Item as Deref>::Target: Copy,
|
||||
{
|
||||
fn next_back(&mut self) -> Option<Self::Item> {
|
||||
self.it.next_back().map(|i| *i)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Iter: IntoIterator> ExactSizeIterator for IterIdentityCopied<Iter>
|
||||
where
|
||||
Iter::IntoIter: ExactSizeIterator,
|
||||
Iter::Item: Deref,
|
||||
<Iter::Item as Deref>::Target: Copy,
|
||||
{
|
||||
}
|
||||
pub struct EarlyBinderIter<I, T> {
|
||||
t: T,
|
||||
_tcx: PhantomData<I>,
|
||||
|
@ -79,7 +79,7 @@ impl<K, V> Root<K, V> {
|
||||
}
|
||||
open_node.push(key, value, right_tree);
|
||||
|
||||
// Go down to the right-most leaf again.
|
||||
// Go down to the rightmost leaf again.
|
||||
cur_node = open_node.forget_type().last_leaf_edge().into_node();
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ impl<K, V> Root<K, V> {
|
||||
pub fn fix_right_border_of_plentiful(&mut self) {
|
||||
let mut cur_node = self.borrow_mut();
|
||||
while let Internal(internal) = cur_node.force() {
|
||||
// Check if right-most child is underfull.
|
||||
// Check if rightmost child is underfull.
|
||||
let mut last_kv = internal.last_kv().consider_for_balancing();
|
||||
debug_assert!(last_kv.left_child_len() >= MIN_LEN * 2);
|
||||
let right_child_len = last_kv.right_child_len();
|
||||
|
@ -1521,7 +1521,7 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> {
|
||||
right_node.val_area_mut(..count - 1),
|
||||
);
|
||||
|
||||
// Move the left-most stolen pair to the parent.
|
||||
// Move the leftmost stolen pair to the parent.
|
||||
let k = left_node.key_area_mut(new_left_len).assume_init_read();
|
||||
let v = left_node.val_area_mut(new_left_len).assume_init_read();
|
||||
let (k, v) = self.parent.replace_kv(k, v);
|
||||
@ -1570,7 +1570,7 @@ impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> {
|
||||
|
||||
// Move leaf data.
|
||||
{
|
||||
// Move the right-most stolen pair to the parent.
|
||||
// Move the rightmost stolen pair to the parent.
|
||||
let k = right_node.key_area_mut(count - 1).assume_init_read();
|
||||
let v = right_node.val_area_mut(count - 1).assume_init_read();
|
||||
let (k, v) = self.parent.replace_kv(k, v);
|
||||
|
@ -71,7 +71,7 @@ pub use self::{
|
||||
/// this can be useful for specializing [`FromIterator`] implementations or recovering the
|
||||
/// remaining elements after an iterator has been partially exhausted.
|
||||
///
|
||||
/// Note that implementations do not necessarily have to provide access to the inner-most
|
||||
/// Note that implementations do not necessarily have to provide access to the innermost
|
||||
/// source of a pipeline. A stateful intermediate adapter might eagerly evaluate a part
|
||||
/// of the pipeline and expose its internal storage as source.
|
||||
///
|
||||
|
@ -704,7 +704,7 @@ impl<T: ?Sized> *const T {
|
||||
/// but it provides slightly more information to the optimizer, which can
|
||||
/// sometimes allow it to optimize slightly better with some backends.
|
||||
///
|
||||
/// This method can be though of as recovering the `count` that was passed
|
||||
/// This method can be thought of as recovering the `count` that was passed
|
||||
/// to [`add`](#method.add) (or, with the parameters in the other order,
|
||||
/// to [`sub`](#method.sub)). The following are all equivalent, assuming
|
||||
/// that their safety preconditions are met:
|
||||
|
@ -602,7 +602,7 @@ pub const fn without_provenance<T>(addr: usize) -> *const T {
|
||||
unsafe { mem::transmute(addr) }
|
||||
}
|
||||
|
||||
/// Creates a new pointer that is dangling, but well-aligned.
|
||||
/// Creates a new pointer that is dangling, but non-null and well-aligned.
|
||||
///
|
||||
/// This is useful for initializing types which lazily allocate, like
|
||||
/// `Vec::new` does.
|
||||
@ -645,7 +645,7 @@ pub const fn without_provenance_mut<T>(addr: usize) -> *mut T {
|
||||
unsafe { mem::transmute(addr) }
|
||||
}
|
||||
|
||||
/// Creates a new pointer that is dangling, but well-aligned.
|
||||
/// Creates a new pointer that is dangling, but non-null and well-aligned.
|
||||
///
|
||||
/// This is useful for initializing types which lazily allocate, like
|
||||
/// `Vec::new` does.
|
||||
|
@ -867,7 +867,7 @@ impl<T: ?Sized> *mut T {
|
||||
/// but it provides slightly more information to the optimizer, which can
|
||||
/// sometimes allow it to optimize slightly better with some backends.
|
||||
///
|
||||
/// This method can be though of as recovering the `count` that was passed
|
||||
/// This method can be thought of as recovering the `count` that was passed
|
||||
/// to [`add`](#method.add) (or, with the parameters in the other order,
|
||||
/// to [`sub`](#method.sub)). The following are all equivalent, assuming
|
||||
/// that their safety preconditions are met:
|
||||
|
@ -107,9 +107,7 @@ impl<T: Sized> NonNull<T> {
|
||||
#[must_use]
|
||||
#[inline]
|
||||
pub const fn dangling() -> Self {
|
||||
// SAFETY: mem::align_of() returns a non-zero usize which is then casted
|
||||
// to a *mut T. Therefore, `ptr` is not null and the conditions for
|
||||
// calling new_unchecked() are respected.
|
||||
// SAFETY: ptr::dangling_mut() returns a non-null well-aligned pointer.
|
||||
unsafe {
|
||||
let ptr = crate::ptr::dangling_mut::<T>();
|
||||
NonNull::new_unchecked(ptr)
|
||||
|
@ -257,3 +257,29 @@ impl SliceContains for i8 {
|
||||
memchr::memchr(byte, bytes).is_some()
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_slice_contains {
|
||||
($($t:ty),*) => {
|
||||
$(
|
||||
impl SliceContains for $t {
|
||||
#[inline]
|
||||
fn slice_contains(&self, arr: &[$t]) -> bool {
|
||||
// Make our LANE_COUNT 4x the normal lane count (aiming for 128 bit vectors).
|
||||
// The compiler will nicely unroll it.
|
||||
const LANE_COUNT: usize = 4 * (128 / (mem::size_of::<$t>() * 8));
|
||||
// SIMD
|
||||
let mut chunks = arr.chunks_exact(LANE_COUNT);
|
||||
for chunk in &mut chunks {
|
||||
if chunk.iter().fold(false, |acc, x| acc | (*x == *self)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// Scalar remainder
|
||||
return chunks.remainder().iter().any(|x| *x == *self);
|
||||
}
|
||||
}
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
impl_slice_contains!(u16, u32, u64, i16, i32, i64, f32, f64, usize, isize);
|
||||
|
@ -453,6 +453,29 @@ impl<A: Allocator> Read for VecDeque<u8, A> {
|
||||
Ok(n)
|
||||
}
|
||||
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
|
||||
let (front, back) = self.as_slices();
|
||||
|
||||
// Use only the front buffer if it is big enough to fill `buf`, else use
|
||||
// the back buffer too.
|
||||
match buf.split_at_mut_checked(front.len()) {
|
||||
None => buf.copy_from_slice(&front[..buf.len()]),
|
||||
Some((buf_front, buf_back)) => match back.split_at_checked(buf_back.len()) {
|
||||
Some((back, _)) => {
|
||||
buf_front.copy_from_slice(front);
|
||||
buf_back.copy_from_slice(back);
|
||||
}
|
||||
None => {
|
||||
self.clear();
|
||||
return Err(io::Error::READ_EXACT_EOF);
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
self.drain(..buf.len());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_buf(&mut self, cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
let (ref mut front, _) = self.as_slices();
|
||||
@ -462,6 +485,29 @@ impl<A: Allocator> Read for VecDeque<u8, A> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_buf_exact(&mut self, mut cursor: BorrowedCursor<'_>) -> io::Result<()> {
|
||||
let len = cursor.capacity();
|
||||
let (front, back) = self.as_slices();
|
||||
|
||||
match front.split_at_checked(cursor.capacity()) {
|
||||
Some((front, _)) => cursor.append(front),
|
||||
None => {
|
||||
cursor.append(front);
|
||||
match back.split_at_checked(cursor.capacity()) {
|
||||
Some((back, _)) => cursor.append(back),
|
||||
None => {
|
||||
cursor.append(back);
|
||||
self.clear();
|
||||
return Err(io::Error::READ_EXACT_EOF);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.drain(..len);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
||||
// The total len is known upfront so we can reserve it in a single call.
|
||||
|
@ -1208,7 +1208,7 @@ The compiler has some latitude in how an entity is encoded as long as the symbol
|
||||
|
||||
* Named functions, methods, and statics shall be represented by a *[path]* production.
|
||||
|
||||
* Paths should be rooted at the inner-most entity that can act as a path root.
|
||||
* Paths should be rooted at the innermost entity that can act as a path root.
|
||||
Roots can be crate-ids, inherent impls, trait impls, and (for items within default methods) trait definitions.
|
||||
|
||||
* The compiler is free to choose disambiguation indices and namespace tags from
|
||||
|
@ -112,7 +112,7 @@ pub(crate) fn write_shared(
|
||||
md_opts.output = cx.dst.clone();
|
||||
md_opts.external_html = cx.shared.layout.external_html.clone();
|
||||
try_err!(
|
||||
crate::markdown::render(&index_page, md_opts, cx.shared.edition()),
|
||||
crate::markdown::render_and_write(&index_page, md_opts, cx.shared.edition()),
|
||||
&index_page
|
||||
);
|
||||
}
|
||||
|
@ -817,7 +817,7 @@ fn main_args(
|
||||
return wrap_return(
|
||||
dcx,
|
||||
interface::run_compiler(config, |_compiler| {
|
||||
markdown::render(&md_input, render_options, edition)
|
||||
markdown::render_and_write(&md_input, render_options, edition)
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
@ -1,3 +1,13 @@
|
||||
//! Standalone markdown rendering.
|
||||
//!
|
||||
//! For the (much more common) case of rendering markdown in doc-comments, see
|
||||
//! [crate::html::markdown].
|
||||
//!
|
||||
//! This is used when [rendering a markdown file to an html file][docs], without processing
|
||||
//! rust source code.
|
||||
//!
|
||||
//! [docs]: https://doc.rust-lang.org/stable/rustdoc/#using-standalone-markdown-files
|
||||
|
||||
use std::fmt::Write as _;
|
||||
use std::fs::{File, create_dir_all, read_to_string};
|
||||
use std::io::prelude::*;
|
||||
@ -33,7 +43,7 @@ fn extract_leading_metadata(s: &str) -> (Vec<&str>, &str) {
|
||||
/// (e.g., output = "bar" => "bar/foo.html").
|
||||
///
|
||||
/// Requires session globals to be available, for symbol interning.
|
||||
pub(crate) fn render<P: AsRef<Path>>(
|
||||
pub(crate) fn render_and_write<P: AsRef<Path>>(
|
||||
input: P,
|
||||
options: RenderOptions,
|
||||
edition: Edition,
|
||||
|
108
tests/assembly/rust-abi-arg-attr.rs
Normal file
108
tests/assembly/rust-abi-arg-attr.rs
Normal file
@ -0,0 +1,108 @@
|
||||
//@ assembly-output: emit-asm
|
||||
//@ revisions: riscv64 riscv64-zbb loongarch64
|
||||
//@ compile-flags: -C opt-level=3
|
||||
//@ [riscv64] compile-flags: --target riscv64gc-unknown-linux-gnu
|
||||
//@ [riscv64] needs-llvm-components: riscv
|
||||
//@ [riscv64-zbb] compile-flags: --target riscv64gc-unknown-linux-gnu
|
||||
//@ [riscv64-zbb] compile-flags: -C target-feature=+zbb
|
||||
//@ [riscv64-zbb] needs-llvm-components: riscv
|
||||
//@ [loongarch64] compile-flags: --target loongarch64-unknown-linux-gnu
|
||||
//@ [loongarch64] needs-llvm-components: loongarch
|
||||
|
||||
#![feature(no_core, lang_items, intrinsics, rustc_attrs)]
|
||||
#![crate_type = "lib"]
|
||||
#![no_std]
|
||||
#![no_core]
|
||||
|
||||
// FIXME: Migrate these code after PR #130693 is landed.
|
||||
// vvvvv core
|
||||
|
||||
#[lang = "sized"]
|
||||
trait Sized {}
|
||||
|
||||
#[lang = "copy"]
|
||||
trait Copy {}
|
||||
|
||||
impl Copy for i8 {}
|
||||
impl Copy for u32 {}
|
||||
impl Copy for i32 {}
|
||||
|
||||
#[lang = "neg"]
|
||||
trait Neg {
|
||||
type Output;
|
||||
|
||||
fn neg(self) -> Self::Output;
|
||||
}
|
||||
|
||||
impl Neg for i8 {
|
||||
type Output = i8;
|
||||
|
||||
fn neg(self) -> Self::Output {
|
||||
-self
|
||||
}
|
||||
}
|
||||
|
||||
#[lang = "Ordering"]
|
||||
#[repr(i8)]
|
||||
enum Ordering {
|
||||
Less = -1,
|
||||
Equal = 0,
|
||||
Greater = 1,
|
||||
}
|
||||
|
||||
extern "rust-intrinsic" {
|
||||
#[rustc_safe_intrinsic]
|
||||
fn three_way_compare<T: Copy>(lhs: T, rhs: T) -> Ordering;
|
||||
}
|
||||
|
||||
// ^^^^^ core
|
||||
|
||||
// Reimplementation of function `{integer}::max`.
|
||||
macro_rules! max {
|
||||
($a:expr, $b:expr) => {
|
||||
match three_way_compare($a, $b) {
|
||||
Ordering::Less | Ordering::Equal => $b,
|
||||
Ordering::Greater => $a,
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// CHECK-LABEL: issue_114508_u32:
|
||||
pub fn issue_114508_u32(a: u32, b: u32) -> u32 {
|
||||
// CHECK-NEXT: .cfi_startproc
|
||||
|
||||
// riscv64-NEXT: bltu a1, a0, .[[RET:.+]]
|
||||
// riscv64-NEXT: mv a0, a1
|
||||
// riscv64-NEXT: .[[RET]]:
|
||||
|
||||
// riscv64-zbb-NEXT: maxu a0, a0, a1
|
||||
|
||||
// loongarch64-NEXT: sltu $a2, $a1, $a0
|
||||
// loongarch64-NEXT: masknez $a1, $a1, $a2
|
||||
// loongarch64-NEXT: maskeqz $a0, $a0, $a2
|
||||
// loongarch64-NEXT: or $a0, $a0, $a1
|
||||
|
||||
// CHECK-NEXT: ret
|
||||
max!(a, b)
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// CHECK-LABEL: issue_114508_i32:
|
||||
pub fn issue_114508_i32(a: i32, b: i32) -> i32 {
|
||||
// CHECK-NEXT: .cfi_startproc
|
||||
|
||||
// riscv64-NEXT: blt a1, a0, .[[RET:.+]]
|
||||
// riscv64-NEXT: mv a0, a1
|
||||
// riscv64-NEXT: .[[RET]]:
|
||||
|
||||
// riscv64-zbb-NEXT: max a0, a0, a1
|
||||
|
||||
// loongarch64-NEXT: slt $a2, $a1, $a0
|
||||
// loongarch64-NEXT: masknez $a1, $a1, $a2
|
||||
// loongarch64-NEXT: maskeqz $a0, $a0, $a2
|
||||
// loongarch64-NEXT: or $a0, $a0, $a1
|
||||
|
||||
// CHECK-NEXT: ret
|
||||
max!(a, b)
|
||||
}
|
@ -5,7 +5,7 @@
|
||||
// Ensure that when val < base, we do not divide or multiply.
|
||||
|
||||
// CHECK-LABEL: @checked_ilog
|
||||
// CHECK-SAME: (i16 noundef %val, i16 noundef %base)
|
||||
// CHECK-SAME: (i16{{.*}} %val, i16{{.*}} %base)
|
||||
#[no_mangle]
|
||||
pub fn checked_ilog(val: u16, base: u16) -> Option<u32> {
|
||||
// CHECK-NOT: udiv
|
||||
|
@ -8,7 +8,7 @@
|
||||
// Thanks to poison semantics, this doesn't even need branches.
|
||||
|
||||
// CHECK-LABEL: @checked_sub_unsigned
|
||||
// CHECK-SAME: (i16 noundef %a, i16 noundef %b)
|
||||
// CHECK-SAME: (i16{{.*}} %a, i16{{.*}} %b)
|
||||
#[no_mangle]
|
||||
pub fn checked_sub_unsigned(a: u16, b: u16) -> Option<u16> {
|
||||
// CHECK-DAG: %[[IS_SOME:.+]] = icmp uge i16 %a, %b
|
||||
@ -26,7 +26,7 @@ pub fn checked_sub_unsigned(a: u16, b: u16) -> Option<u16> {
|
||||
// looking for no-wrap flags, we just need there to not be any masking.
|
||||
|
||||
// CHECK-LABEL: @checked_shl_unsigned
|
||||
// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
|
||||
// CHECK-SAME: (i32{{.*}} %a, i32{{.*}} %b)
|
||||
#[no_mangle]
|
||||
pub fn checked_shl_unsigned(a: u32, b: u32) -> Option<u32> {
|
||||
// CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
|
||||
@ -41,7 +41,7 @@ pub fn checked_shl_unsigned(a: u32, b: u32) -> Option<u32> {
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @checked_shr_unsigned
|
||||
// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
|
||||
// CHECK-SAME: (i32{{.*}} %a, i32{{.*}} %b)
|
||||
#[no_mangle]
|
||||
pub fn checked_shr_unsigned(a: u32, b: u32) -> Option<u32> {
|
||||
// CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
|
||||
@ -56,7 +56,7 @@ pub fn checked_shr_unsigned(a: u32, b: u32) -> Option<u32> {
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @checked_shl_signed
|
||||
// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
|
||||
// CHECK-SAME: (i32{{.*}} %a, i32{{.*}} %b)
|
||||
#[no_mangle]
|
||||
pub fn checked_shl_signed(a: i32, b: u32) -> Option<i32> {
|
||||
// CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
|
||||
@ -71,7 +71,7 @@ pub fn checked_shl_signed(a: i32, b: u32) -> Option<i32> {
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @checked_shr_signed
|
||||
// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
|
||||
// CHECK-SAME: (i32{{.*}} %a, i32{{.*}} %b)
|
||||
#[no_mangle]
|
||||
pub fn checked_shr_signed(a: i32, b: u32) -> Option<i32> {
|
||||
// CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
|
||||
@ -86,7 +86,7 @@ pub fn checked_shr_signed(a: i32, b: u32) -> Option<i32> {
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @checked_add_one_unwrap_unsigned
|
||||
// CHECK-SAME: (i32 noundef %x)
|
||||
// CHECK-SAME: (i32{{.*}} %x)
|
||||
#[no_mangle]
|
||||
pub fn checked_add_one_unwrap_unsigned(x: u32) -> u32 {
|
||||
// CHECK: %[[IS_MAX:.+]] = icmp eq i32 %x, -1
|
||||
|
@ -12,7 +12,7 @@ use std::cmp::Ordering;
|
||||
pub struct Foo(u16);
|
||||
|
||||
// CHECK-LABEL: @check_lt
|
||||
// CHECK-SAME: (i16 noundef %[[A:.+]], i16 noundef %[[B:.+]])
|
||||
// CHECK-SAME: (i16{{.*}} %[[A:.+]], i16{{.*}} %[[B:.+]])
|
||||
#[no_mangle]
|
||||
pub fn check_lt(a: Foo, b: Foo) -> bool {
|
||||
// CHECK: %[[R:.+]] = icmp ult i16 %[[A]], %[[B]]
|
||||
@ -21,7 +21,7 @@ pub fn check_lt(a: Foo, b: Foo) -> bool {
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @check_le
|
||||
// CHECK-SAME: (i16 noundef %[[A:.+]], i16 noundef %[[B:.+]])
|
||||
// CHECK-SAME: (i16{{.*}} %[[A:.+]], i16{{.*}} %[[B:.+]])
|
||||
#[no_mangle]
|
||||
pub fn check_le(a: Foo, b: Foo) -> bool {
|
||||
// CHECK: %[[R:.+]] = icmp ule i16 %[[A]], %[[B]]
|
||||
@ -30,7 +30,7 @@ pub fn check_le(a: Foo, b: Foo) -> bool {
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @check_gt
|
||||
// CHECK-SAME: (i16 noundef %[[A:.+]], i16 noundef %[[B:.+]])
|
||||
// CHECK-SAME: (i16{{.*}} %[[A:.+]], i16{{.*}} %[[B:.+]])
|
||||
#[no_mangle]
|
||||
pub fn check_gt(a: Foo, b: Foo) -> bool {
|
||||
// CHECK: %[[R:.+]] = icmp ugt i16 %[[A]], %[[B]]
|
||||
@ -39,7 +39,7 @@ pub fn check_gt(a: Foo, b: Foo) -> bool {
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @check_ge
|
||||
// CHECK-SAME: (i16 noundef %[[A:.+]], i16 noundef %[[B:.+]])
|
||||
// CHECK-SAME: (i16{{.*}} %[[A:.+]], i16{{.*}} %[[B:.+]])
|
||||
#[no_mangle]
|
||||
pub fn check_ge(a: Foo, b: Foo) -> bool {
|
||||
// CHECK: %[[R:.+]] = icmp uge i16 %[[A]], %[[B]]
|
||||
|
@ -6,11 +6,11 @@
|
||||
|
||||
#[no_mangle]
|
||||
pub fn sum(x: u32, y: u32) -> u32 {
|
||||
// YES-LABEL: define{{.*}}i32 @sum(i32 noundef %0, i32 noundef %1)
|
||||
// YES-LABEL: define{{.*}}i32 @sum(i32{{.*}} %0, i32{{.*}} %1)
|
||||
// YES-NEXT: %3 = add i32 %1, %0
|
||||
// YES-NEXT: ret i32 %3
|
||||
|
||||
// NO-LABEL: define{{.*}}i32 @sum(i32 noundef %x, i32 noundef %y)
|
||||
// NO-LABEL: define{{.*}}i32 @sum(i32{{.*}} %x, i32{{.*}} %y)
|
||||
// NO-NEXT: start:
|
||||
// NO-NEXT: %z = add i32 %y, %x
|
||||
// NO-NEXT: ret i32 %z
|
||||
|
@ -32,7 +32,7 @@ pub fn boolean(x: bool) -> bool {
|
||||
x
|
||||
}
|
||||
|
||||
// CHECK: i8 @maybeuninit_boolean(i8 %x)
|
||||
// CHECK: i8 @maybeuninit_boolean(i8{{.*}} %x)
|
||||
#[no_mangle]
|
||||
pub fn maybeuninit_boolean(x: MaybeUninit<bool>) -> MaybeUninit<bool> {
|
||||
x
|
||||
@ -44,19 +44,19 @@ pub fn enum_bool(x: MyBool) -> MyBool {
|
||||
x
|
||||
}
|
||||
|
||||
// CHECK: i8 @maybeuninit_enum_bool(i8 %x)
|
||||
// CHECK: i8 @maybeuninit_enum_bool(i8{{.*}} %x)
|
||||
#[no_mangle]
|
||||
pub fn maybeuninit_enum_bool(x: MaybeUninit<MyBool>) -> MaybeUninit<MyBool> {
|
||||
x
|
||||
}
|
||||
|
||||
// CHECK: noundef{{( range\(i32 0, 1114112\))?}} i32 @char(i32 noundef{{( range\(i32 0, 1114112\))?}} %x)
|
||||
// CHECK: noundef{{( range\(i32 0, 1114112\))?}} i32 @char(i32{{.*}}{{( range\(i32 0, 1114112\))?}} %x)
|
||||
#[no_mangle]
|
||||
pub fn char(x: char) -> char {
|
||||
x
|
||||
}
|
||||
|
||||
// CHECK: i32 @maybeuninit_char(i32 %x)
|
||||
// CHECK: i32 @maybeuninit_char(i32{{.*}} %x)
|
||||
#[no_mangle]
|
||||
pub fn maybeuninit_char(x: MaybeUninit<char>) -> MaybeUninit<char> {
|
||||
x
|
||||
|
@ -10,8 +10,7 @@ use std::intrinsics::three_way_compare;
|
||||
|
||||
#[no_mangle]
|
||||
// CHECK-LABEL: @signed_cmp
|
||||
// DEBUG-SAME: (i16 %a, i16 %b)
|
||||
// OPTIM-SAME: (i16 noundef %a, i16 noundef %b)
|
||||
// CHECK-SAME: (i16{{.*}} %a, i16{{.*}} %b)
|
||||
pub fn signed_cmp(a: i16, b: i16) -> std::cmp::Ordering {
|
||||
// DEBUG: %[[GT:.+]] = icmp sgt i16 %a, %b
|
||||
// DEBUG: %[[ZGT:.+]] = zext i1 %[[GT]] to i8
|
||||
@ -29,8 +28,7 @@ pub fn signed_cmp(a: i16, b: i16) -> std::cmp::Ordering {
|
||||
|
||||
#[no_mangle]
|
||||
// CHECK-LABEL: @unsigned_cmp
|
||||
// DEBUG-SAME: (i16 %a, i16 %b)
|
||||
// OPTIM-SAME: (i16 noundef %a, i16 noundef %b)
|
||||
// CHECK-SAME: (i16{{.*}} %a, i16{{.*}} %b)
|
||||
pub fn unsigned_cmp(a: u16, b: u16) -> std::cmp::Ordering {
|
||||
// DEBUG: %[[GT:.+]] = icmp ugt i16 %a, %b
|
||||
// DEBUG: %[[ZGT:.+]] = zext i1 %[[GT]] to i8
|
||||
|
@ -9,7 +9,7 @@
|
||||
#[repr(transparent)]
|
||||
pub struct Transparent32(u32);
|
||||
|
||||
// CHECK: i32 @make_transparent(i32 noundef %x)
|
||||
// CHECK: i32 @make_transparent(i32{{.*}} %x)
|
||||
#[no_mangle]
|
||||
pub fn make_transparent(x: u32) -> Transparent32 {
|
||||
// CHECK-NOT: alloca
|
||||
@ -18,7 +18,7 @@ pub fn make_transparent(x: u32) -> Transparent32 {
|
||||
a
|
||||
}
|
||||
|
||||
// CHECK: i32 @make_closure(i32 noundef %x)
|
||||
// CHECK: i32 @make_closure(i32{{.*}} %x)
|
||||
#[no_mangle]
|
||||
pub fn make_closure(x: i32) -> impl Fn(i32) -> i32 {
|
||||
// CHECK-NOT: alloca
|
||||
@ -40,7 +40,7 @@ pub fn make_transparent_pair(x: (u16, u16)) -> TransparentPair {
|
||||
a
|
||||
}
|
||||
|
||||
// CHECK-LABEL: { i32, i32 } @make_2_tuple(i32 noundef %x)
|
||||
// CHECK-LABEL: { i32, i32 } @make_2_tuple(i32{{.*}} %x)
|
||||
#[no_mangle]
|
||||
pub fn make_2_tuple(x: u32) -> (u32, u32) {
|
||||
// CHECK-NOT: alloca
|
||||
@ -59,7 +59,7 @@ pub fn make_cell_of_bool(b: bool) -> std::cell::Cell<bool> {
|
||||
std::cell::Cell::new(b)
|
||||
}
|
||||
|
||||
// CHECK-LABEL: { i8, i16 } @make_cell_of_bool_and_short(i1 noundef zeroext %b, i16 noundef %s)
|
||||
// CHECK-LABEL: { i8, i16 } @make_cell_of_bool_and_short(i1 noundef zeroext %b, i16{{.*}} %s)
|
||||
#[no_mangle]
|
||||
pub fn make_cell_of_bool_and_short(b: bool, s: u16) -> std::cell::Cell<(bool, u16)> {
|
||||
// CHECK-NOT: alloca
|
||||
@ -92,7 +92,7 @@ pub fn make_struct_0() -> Struct0 {
|
||||
|
||||
pub struct Struct1(i32);
|
||||
|
||||
// CHECK-LABEL: i32 @make_struct_1(i32 noundef %a)
|
||||
// CHECK-LABEL: i32 @make_struct_1(i32{{.*}} %a)
|
||||
#[no_mangle]
|
||||
pub fn make_struct_1(a: i32) -> Struct1 {
|
||||
// CHECK: ret i32 %a
|
||||
@ -104,7 +104,7 @@ pub struct Struct2Asc(i16, i64);
|
||||
|
||||
// bit32-LABEL: void @make_struct_2_asc({{.*}} sret({{[^,]*}}) {{.*}} %s,
|
||||
// bit64-LABEL: { i64, i16 } @make_struct_2_asc(
|
||||
// CHECK-SAME: i16 noundef %a, i64 noundef %b)
|
||||
// CHECK-SAME: i16{{.*}} %a, i64 noundef %b)
|
||||
#[no_mangle]
|
||||
pub fn make_struct_2_asc(a: i16, b: i64) -> Struct2Asc {
|
||||
// CHECK-NOT: alloca
|
||||
@ -122,7 +122,7 @@ pub struct Struct2Desc(i64, i16);
|
||||
|
||||
// bit32-LABEL: void @make_struct_2_desc({{.*}} sret({{[^,]*}}) {{.*}} %s,
|
||||
// bit64-LABEL: { i64, i16 } @make_struct_2_desc(
|
||||
// CHECK-SAME: i64 noundef %a, i16 noundef %b)
|
||||
// CHECK-SAME: i64 noundef %a, i16{{.*}} %b)
|
||||
#[no_mangle]
|
||||
pub fn make_struct_2_desc(a: i64, b: i16) -> Struct2Desc {
|
||||
// CHECK-NOT: alloca
|
||||
|
@ -24,7 +24,7 @@ pub fn nonzero_int(x: NonZero<u128>) -> NonZero<u128> {
|
||||
x
|
||||
}
|
||||
|
||||
// CHECK: noundef range(i8 0, 3) i8 @optional_bool(i8 noundef range(i8 0, 3) %x)
|
||||
// CHECK: noundef range(i8 0, 3) i8 @optional_bool(i8{{.*}} range(i8 0, 3) %x)
|
||||
#[no_mangle]
|
||||
pub fn optional_bool(x: Option<bool>) -> Option<bool> {
|
||||
x
|
||||
@ -36,7 +36,7 @@ pub enum Enum0 {
|
||||
C,
|
||||
}
|
||||
|
||||
// CHECK: noundef range(i8 0, 4) i8 @enum0_value(i8 noundef range(i8 0, 4) %x)
|
||||
// CHECK: noundef range(i8 0, 4) i8 @enum0_value(i8{{.*}} range(i8 0, 4) %x)
|
||||
#[no_mangle]
|
||||
pub fn enum0_value(x: Enum0) -> Enum0 {
|
||||
x
|
||||
|
111
tests/codegen/rust-abi-arch-specific-adjustment.rs
Normal file
111
tests/codegen/rust-abi-arch-specific-adjustment.rs
Normal file
@ -0,0 +1,111 @@
|
||||
//@ compile-flags: -O -C no-prepopulate-passes
|
||||
//@ revisions: riscv64 loongarch64
|
||||
|
||||
//@[riscv64] only-riscv64
|
||||
//@[riscv64] compile-flags: --target riscv64gc-unknown-linux-gnu
|
||||
//@[riscv64] needs-llvm-components: riscv
|
||||
|
||||
//@[loongarch64] only-loongarch64
|
||||
//@[loongarch64] compile-flags: --target loongarch64-unknown-linux-gnu
|
||||
//@[loongarch64] needs-llvm-components: loongarch
|
||||
|
||||
#![crate_type = "lib"]
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef i8 @arg_attr_u8(i8 noundef zeroext %x)
|
||||
// loongarch64: define noundef i8 @arg_attr_u8(i8 noundef zeroext %x)
|
||||
pub fn arg_attr_u8(x: u8) -> u8 {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef i16 @arg_attr_u16(i16 noundef zeroext %x)
|
||||
// loongarch64: define noundef i16 @arg_attr_u16(i16 noundef zeroext %x)
|
||||
pub fn arg_attr_u16(x: u16) -> u16 {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef i32 @arg_attr_u32(i32 noundef signext %x)
|
||||
// loongarch64: define noundef i32 @arg_attr_u32(i32 noundef signext %x)
|
||||
pub fn arg_attr_u32(x: u32) -> u32 {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef i64 @arg_attr_u64(i64 noundef %x)
|
||||
// loongarch64: define noundef i64 @arg_attr_u64(i64 noundef %x)
|
||||
pub fn arg_attr_u64(x: u64) -> u64 {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef i128 @arg_attr_u128(i128 noundef %x)
|
||||
// loongarch64: define noundef i128 @arg_attr_u128(i128 noundef %x)
|
||||
pub fn arg_attr_u128(x: u128) -> u128 {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef i8 @arg_attr_i8(i8 noundef signext %x)
|
||||
// loongarch64: define noundef i8 @arg_attr_i8(i8 noundef signext %x)
|
||||
pub fn arg_attr_i8(x: i8) -> i8 {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef i16 @arg_attr_i16(i16 noundef signext %x)
|
||||
// loongarch64: define noundef i16 @arg_attr_i16(i16 noundef signext %x)
|
||||
pub fn arg_attr_i16(x: i16) -> i16 {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef i32 @arg_attr_i32(i32 noundef signext %x)
|
||||
// loongarch64: define noundef i32 @arg_attr_i32(i32 noundef signext %x)
|
||||
pub fn arg_attr_i32(x: i32) -> i32 {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef i64 @arg_attr_i64(i64 noundef %x)
|
||||
// loongarch64: define noundef i64 @arg_attr_i64(i64 noundef %x)
|
||||
pub fn arg_attr_i64(x: i64) -> i64 {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef i128 @arg_attr_i128(i128 noundef %x)
|
||||
// loongarch64: define noundef i128 @arg_attr_i128(i128 noundef %x)
|
||||
pub fn arg_attr_i128(x: i128) -> i128 {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef zeroext i1 @arg_attr_bool(i1 noundef zeroext %x)
|
||||
// loongarch64: define noundef zeroext i1 @arg_attr_bool(i1 noundef zeroext %x)
|
||||
pub fn arg_attr_bool(x: bool) -> bool {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// ignore-tidy-linelength
|
||||
// riscv64: define noundef{{( range\(i32 0, 1114112\))?}} i32 @arg_attr_char(i32 noundef signext{{( range\(i32 0, 1114112\))?}} %x)
|
||||
// loongarch64: define noundef{{( range\(i32 0, 1114112\))?}} i32 @arg_attr_char(i32 noundef signext{{( range\(i32 0, 1114112\))?}} %x)
|
||||
pub fn arg_attr_char(x: char) -> char {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef float @arg_attr_f32(float noundef %x)
|
||||
// loongarch64: define noundef float @arg_attr_f32(float noundef %x)
|
||||
pub fn arg_attr_f32(x: f32) -> f32 {
|
||||
x
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
// riscv64: define noundef double @arg_attr_f64(double noundef %x)
|
||||
// loongarch64: define noundef double @arg_attr_f64(double noundef %x)
|
||||
pub fn arg_attr_f64(x: f64) -> f64 {
|
||||
x
|
||||
}
|
@ -12,7 +12,7 @@ pub fn foo(f: fn(i32) -> i32, arg: i32) -> i32 {
|
||||
// CHECK: Function Attrs: {{.*}}
|
||||
// CHECK-LABEL: define{{.*}}foo{{.*}}!type !{{[0-9]+}} !type !{{[0-9]+}} !type !{{[0-9]+}} !type !{{[0-9]+}}
|
||||
// CHECK: start:
|
||||
// CHECK-NEXT: {{%.+}} = call i32 %f(i32 %arg)
|
||||
// CHECK-NEXT: {{%.+}} = call i32 %f(i32{{.*}} %arg)
|
||||
// CHECK-NEXT: ret i32 {{%.+}}
|
||||
f(arg)
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ pub fn foo(f: fn(i32) -> i32, arg: i32) -> i32 {
|
||||
// CHECK: [[TT:%.+]] = call i1 @llvm.type.test(ptr {{%f|%0}}, metadata !"{{[[:print:]]+}}")
|
||||
// CHECK-NEXT: br i1 [[TT]], label %type_test.pass, label %type_test.fail
|
||||
// CHECK: type_test.pass:
|
||||
// CHECK-NEXT: {{%.+}} = call i32 %f(i32 %arg)
|
||||
// CHECK-NEXT: {{%.+}} = call i32 %f(i32{{.*}} %arg)
|
||||
// CHECK: type_test.fail:
|
||||
// CHECK-NEXT: call void @llvm.trap()
|
||||
// CHECK-NEXT: unreachable
|
||||
|
@ -25,7 +25,7 @@ pub fn bool_to_byte(b: bool) -> u8 {
|
||||
unsafe { std::mem::transmute(b) }
|
||||
}
|
||||
|
||||
// CHECK-LABEL: define{{.*}}zeroext i1 @byte_to_bool(i8 %byte)
|
||||
// CHECK-LABEL: define{{.*}}zeroext i1 @byte_to_bool(i8{{.*}} %byte)
|
||||
// CHECK: %_0 = trunc i8 %byte to i1
|
||||
// CHECK-NEXT: ret i1 %_0
|
||||
#[no_mangle]
|
||||
|
@ -131,7 +131,7 @@ pub fn test_CUnionU128(_: CUnionU128) {
|
||||
pub union UnionBool {
|
||||
b: bool,
|
||||
}
|
||||
// CHECK: define {{(dso_local )?}}noundef zeroext i1 @test_UnionBool(i8 %b)
|
||||
// CHECK: define {{(dso_local )?}}noundef zeroext i1 @test_UnionBool(i8{{.*}} %b)
|
||||
#[no_mangle]
|
||||
pub fn test_UnionBool(b: UnionBool) -> bool {
|
||||
unsafe { b.b }
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#![crate_type = "lib"]
|
||||
|
||||
// CHECK-LABEL: define{{.*}}i32 @test(i32 noundef %a, i32 noundef %b)
|
||||
// CHECK-LABEL: define{{.*}}i32 @test(i32{{.*}} %a, i32{{.*}} %b)
|
||||
#[no_mangle]
|
||||
pub fn test(a: u32, b: u32) -> u32 {
|
||||
let c = a + b;
|
||||
|
7
tests/run-make/rust-lld-link-script-provide/main.rs
Normal file
7
tests/run-make/rust-lld-link-script-provide/main.rs
Normal file
@ -0,0 +1,7 @@
|
||||
#[no_mangle]
|
||||
fn foo() {}
|
||||
|
||||
#[no_mangle]
|
||||
fn bar() {}
|
||||
|
||||
fn main() {}
|
18
tests/run-make/rust-lld-link-script-provide/rmake.rs
Normal file
18
tests/run-make/rust-lld-link-script-provide/rmake.rs
Normal file
@ -0,0 +1,18 @@
|
||||
// This test ensures that the “symbol not found” error does not occur
|
||||
// when the symbols in the `PROVIDE` of the link script can be eliminated.
|
||||
// This is a regression test for #131164.
|
||||
|
||||
//@ needs-rust-lld
|
||||
//@ only-x86_64-unknown-linux-gnu
|
||||
|
||||
use run_make_support::rustc;
|
||||
|
||||
fn main() {
|
||||
rustc()
|
||||
.input("main.rs")
|
||||
.arg("-Zlinker-features=+lld")
|
||||
.arg("-Clink-self-contained=+linker")
|
||||
.arg("-Zunstable-options")
|
||||
.link_arg("-Tscript.t")
|
||||
.run();
|
||||
}
|
1
tests/run-make/rust-lld-link-script-provide/script.t
Normal file
1
tests/run-make/rust-lld-link-script-provide/script.t
Normal file
@ -0,0 +1 @@
|
||||
PROVIDE(foo = bar);
|
@ -6,9 +6,9 @@ fn main() {
|
||||
macro_rules! one_nested_count_and_len {
|
||||
( $( [ $( $l:literal ),* ] ),* ) => {
|
||||
[
|
||||
// outer-most repetition
|
||||
// outermost repetition
|
||||
$(
|
||||
// inner-most repetition
|
||||
// innermost repetition
|
||||
$(
|
||||
${ignore($l)} ${index()}, ${len()},
|
||||
)*
|
||||
@ -23,34 +23,34 @@ fn main() {
|
||||
[
|
||||
// # ["foo"]
|
||||
|
||||
// ## inner-most repetition (first iteration)
|
||||
// ## innermost repetition (first iteration)
|
||||
//
|
||||
// `index` is 0 because this is the first inner-most iteration.
|
||||
// `len` is 1 because there is only one inner-most repetition, "foo".
|
||||
// `index` is 0 because this is the first innermost iteration.
|
||||
// `len` is 1 because there is only one innermost repetition, "foo".
|
||||
0, 1,
|
||||
// ## outer-most repetition (first iteration)
|
||||
// ## outermost repetition (first iteration)
|
||||
//
|
||||
// `count` is 1 because of "foo", i,e, `$l` has only one repetition,
|
||||
// `index` is 0 because this is the first outer-most iteration.
|
||||
// `len` is 2 because there are 2 outer-most repetitions, ["foo"] and ["bar", "baz"]
|
||||
// `index` is 0 because this is the first outermost iteration.
|
||||
// `len` is 2 because there are 2 outermost repetitions, ["foo"] and ["bar", "baz"]
|
||||
1, 0, 2,
|
||||
// # ["bar", "baz"]
|
||||
|
||||
// ## inner-most repetition (first iteration)
|
||||
// ## innermost repetition (first iteration)
|
||||
//
|
||||
// `index` is 0 because this is the first inner-most iteration
|
||||
// `index` is 0 because this is the first innermost iteration
|
||||
// `len` is 2 because there are repetitions, "bar" and "baz"
|
||||
0, 2,
|
||||
// ## inner-most repetition (second iteration)
|
||||
// ## innermost repetition (second iteration)
|
||||
//
|
||||
// `index` is 1 because this is the second inner-most iteration
|
||||
// `index` is 1 because this is the second innermost iteration
|
||||
// `len` is 2 because there are repetitions, "bar" and "baz"
|
||||
1, 2,
|
||||
// ## outer-most repetition (second iteration)
|
||||
// ## outermost repetition (second iteration)
|
||||
//
|
||||
// `count` is 2 because of "bar" and "baz", i,e, `$l` has two repetitions,
|
||||
// `index` is 1 because this is the second outer-most iteration
|
||||
// `len` is 2 because there are 2 outer-most repetitions, ["foo"] and ["bar", "baz"]
|
||||
// `index` is 1 because this is the second outermost iteration
|
||||
// `len` is 2 because there are 2 outermost repetitions, ["foo"] and ["bar", "baz"]
|
||||
2, 1, 2,
|
||||
// # last count
|
||||
|
||||
@ -61,7 +61,7 @@ fn main() {
|
||||
|
||||
// Based on the above explanation, the following macros should be straightforward
|
||||
|
||||
// Grouped from the outer-most to the inner-most
|
||||
// Grouped from the outermost to the innermost
|
||||
macro_rules! three_nested_count {
|
||||
( $( { $( [ $( ( $( $i:ident )* ) )* ] )* } )* ) => {
|
||||
&[
|
||||
@ -156,7 +156,7 @@ fn main() {
|
||||
][..]
|
||||
);
|
||||
|
||||
// Grouped from the outer-most to the inner-most
|
||||
// Grouped from the outermost to the innermost
|
||||
macro_rules! three_nested_len {
|
||||
( $( { $( [ $( ( $( $i:ident )* ) )* ] )* } )* ) => {
|
||||
&[
|
||||
|
@ -10,12 +10,12 @@ macro_rules! curly__no_rhs_dollar__round {
|
||||
|
||||
macro_rules! curly__no_rhs_dollar__no_round {
|
||||
( $i:ident ) => { ${ count($i) } };
|
||||
//~^ ERROR `count` can not be placed inside the inner-most repetition
|
||||
//~^ ERROR `count` can not be placed inside the innermost repetition
|
||||
}
|
||||
|
||||
macro_rules! curly__rhs_dollar__no_round {
|
||||
( $i:ident ) => { ${ count($i) } };
|
||||
//~^ ERROR `count` can not be placed inside the inner-most repetition
|
||||
//~^ ERROR `count` can not be placed inside the innermost repetition
|
||||
}
|
||||
|
||||
#[rustfmt::skip] // autoformatters can break a few of the error traces
|
||||
|
@ -196,13 +196,13 @@ error: expected identifier or string literal
|
||||
LL | ( $( $i:ident ),* ) => { ${ {} } };
|
||||
| ^^
|
||||
|
||||
error: `count` can not be placed inside the inner-most repetition
|
||||
error: `count` can not be placed inside the innermost repetition
|
||||
--> $DIR/syntax-errors.rs:12:24
|
||||
|
|
||||
LL | ( $i:ident ) => { ${ count($i) } };
|
||||
| ^^^^^^^^^^^^^
|
||||
|
||||
error: `count` can not be placed inside the inner-most repetition
|
||||
error: `count` can not be placed inside the innermost repetition
|
||||
--> $DIR/syntax-errors.rs:17:24
|
||||
|
|
||||
LL | ( $i:ident ) => { ${ count($i) } };
|
||||
|
Loading…
Reference in New Issue
Block a user