Auto merge of #133339 - jieyouxu:rollup-gav0nvr, r=jieyouxu

Rollup of 8 pull requests

Successful merges:

 - #133238 (re-export `is_loongarch_feature_detected`)
 - #133288 (Support `each_ref` and `each_mut` in `[T; N]` in constant expressions.)
 - #133311 (Miri subtree update)
 - #133313 (Use arc4random of libc for RTEMS target)
 - #133319 (Simplify `fulfill_implication`)
 - #133323 (Bail in effects in old solver if self ty is ty var)
 - #133330 (library: update comment around close())
 - #133337 (Fix typo in `std:🧵:Scope::spawn` documentation.)

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2024-11-22 16:27:07 +00:00
commit a47555110c
59 changed files with 1701 additions and 921 deletions

View File

@ -70,6 +70,7 @@ use rustc_hir as hir;
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::infer::outlives::env::OutlivesEnvironment;
use rustc_infer::traits::ObligationCause;
use rustc_infer::traits::specialization_graph::Node;
use rustc_middle::ty::trait_def::TraitSpecializationKind;
use rustc_middle::ty::{
@ -210,13 +211,7 @@ fn get_impl_args(
impl1_def_id.to_def_id(),
impl1_args,
impl2_node,
|_, span| {
traits::ObligationCause::new(
impl1_span,
impl1_def_id,
traits::ObligationCauseCode::WhereClause(impl2_node.def_id(), span),
)
},
&ObligationCause::misc(impl1_span, impl1_def_id),
);
let errors = ocx.select_all_or_error();

View File

@ -27,6 +27,11 @@ pub fn evaluate_host_effect_obligation<'tcx>(
);
}
// Force ambiguity for infer self ty.
if obligation.predicate.self_ty().is_ty_var() {
return Err(EvaluationFailure::Ambiguous);
}
match evaluate_host_effect_from_bounds(selcx, obligation) {
Ok(result) => return Ok(result),
Err(EvaluationFailure::Ambiguous) => return Err(EvaluationFailure::Ambiguous),

View File

@ -15,24 +15,24 @@ use rustc_data_structures::fx::FxIndexSet;
use rustc_errors::codes::*;
use rustc_errors::{Diag, EmissionGuarantee};
use rustc_hir::def_id::{DefId, LocalDefId};
use rustc_infer::infer::DefineOpaqueTypes;
use rustc_middle::bug;
use rustc_middle::query::LocalCrate;
use rustc_middle::ty::print::PrintTraitRefExt as _;
use rustc_middle::ty::{
self, GenericArgsRef, ImplSubject, Ty, TyCtxt, TypeVisitableExt, TypingMode,
};
use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, TypeVisitableExt, TypingMode};
use rustc_session::lint::builtin::{COHERENCE_LEAK_CHECK, ORDER_DEPENDENT_TRAIT_OBJECTS};
use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, sym};
use rustc_type_ir::solve::NoSolution;
use specialization_graph::GraphExt;
use tracing::{debug, instrument};
use super::{SelectionContext, util};
use crate::error_reporting::traits::to_pretty_impl_header;
use crate::errors::NegativePositiveConflict;
use crate::infer::{InferCtxt, InferOk, TyCtxtInferExt};
use crate::infer::{InferCtxt, TyCtxtInferExt};
use crate::traits::select::IntercrateAmbiguityCause;
use crate::traits::{FutureCompatOverlapErrorKind, ObligationCause, ObligationCtxt, coherence};
use crate::traits::{
FutureCompatOverlapErrorKind, ObligationCause, ObligationCtxt, coherence,
predicates_for_generics,
};
/// Information pertinent to an overlapping impl error.
#[derive(Debug)]
@ -87,9 +87,14 @@ pub fn translate_args<'tcx>(
source_args: GenericArgsRef<'tcx>,
target_node: specialization_graph::Node,
) -> GenericArgsRef<'tcx> {
translate_args_with_cause(infcx, param_env, source_impl, source_args, target_node, |_, _| {
ObligationCause::dummy()
})
translate_args_with_cause(
infcx,
param_env,
source_impl,
source_args,
target_node,
&ObligationCause::dummy(),
)
}
/// Like [translate_args], but obligations from the parent implementation
@ -104,7 +109,7 @@ pub fn translate_args_with_cause<'tcx>(
source_impl: DefId,
source_args: GenericArgsRef<'tcx>,
target_node: specialization_graph::Node,
cause: impl Fn(usize, Span) -> ObligationCause<'tcx>,
cause: &ObligationCause<'tcx>,
) -> GenericArgsRef<'tcx> {
debug!(
"translate_args({:?}, {:?}, {:?}, {:?})",
@ -123,7 +128,7 @@ pub fn translate_args_with_cause<'tcx>(
}
fulfill_implication(infcx, param_env, source_trait_ref, source_impl, target_impl, cause)
.unwrap_or_else(|()| {
.unwrap_or_else(|_| {
bug!(
"When translating generic parameters from {source_impl:?} to \
{target_impl:?}, the expected specialization failed to hold"
@ -137,6 +142,84 @@ pub fn translate_args_with_cause<'tcx>(
source_args.rebase_onto(infcx.tcx, source_impl, target_args)
}
/// Attempt to fulfill all obligations of `target_impl` after unification with
/// `source_trait_ref`. If successful, returns the generic parameters for *all* the
/// generics of `target_impl`, including both those needed to unify with
/// `source_trait_ref` and those whose identity is determined via a where
/// clause in the impl.
fn fulfill_implication<'tcx>(
infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
source_trait_ref: ty::TraitRef<'tcx>,
source_impl: DefId,
target_impl: DefId,
cause: &ObligationCause<'tcx>,
) -> Result<GenericArgsRef<'tcx>, NoSolution> {
debug!(
"fulfill_implication({:?}, trait_ref={:?} |- {:?} applies)",
param_env, source_trait_ref, target_impl
);
let ocx = ObligationCtxt::new(infcx);
let source_trait_ref = ocx.normalize(cause, param_env, source_trait_ref);
if !ocx.select_all_or_error().is_empty() {
infcx.dcx().span_delayed_bug(
infcx.tcx.def_span(source_impl),
format!("failed to fully normalize {source_trait_ref}"),
);
return Err(NoSolution);
}
let target_args = infcx.fresh_args_for_item(DUMMY_SP, target_impl);
let target_trait_ref = ocx.normalize(
cause,
param_env,
infcx
.tcx
.impl_trait_ref(target_impl)
.expect("expected source impl to be a trait impl")
.instantiate(infcx.tcx, target_args),
);
// do the impls unify? If not, no specialization.
ocx.eq(cause, param_env, source_trait_ref, target_trait_ref)?;
// Now check that the source trait ref satisfies all the where clauses of the target impl.
// This is not just for correctness; we also need this to constrain any params that may
// only be referenced via projection predicates.
let predicates = ocx.normalize(
cause,
param_env,
infcx.tcx.predicates_of(target_impl).instantiate(infcx.tcx, target_args),
);
let obligations = predicates_for_generics(|_, _| cause.clone(), param_env, predicates);
ocx.register_obligations(obligations);
let errors = ocx.select_all_or_error();
if !errors.is_empty() {
// no dice!
debug!(
"fulfill_implication: for impls on {:?} and {:?}, \
could not fulfill: {:?} given {:?}",
source_trait_ref,
target_trait_ref,
errors,
param_env.caller_bounds()
);
return Err(NoSolution);
}
debug!(
"fulfill_implication: an impl for {:?} specializes {:?}",
source_trait_ref, target_trait_ref
);
// Now resolve the *generic parameters* we built for the target earlier, replacing
// the inference variables inside with whatever we got from fulfillment.
Ok(infcx.resolve_vars_if_possible(target_args))
}
pub(super) fn specialization_enabled_in(tcx: TyCtxt<'_>, _: LocalCrate) -> bool {
tcx.features().specialization() || tcx.features().min_specialization()
}
@ -182,8 +265,9 @@ pub(super) fn specializes(tcx: TyCtxt<'_>, (impl1_def_id, impl2_def_id): (DefId,
return false;
}
// create a parameter environment corresponding to a (placeholder) instantiation of impl1
let penv = tcx.param_env(impl1_def_id);
// create a parameter environment corresponding to an identity instantiation of impl1,
// i.e. the most generic instantiation of impl1.
let param_env = tcx.param_env(impl1_def_id);
// Create an infcx, taking the predicates of impl1 as assumptions:
let infcx = tcx.infer_ctxt().build(TypingMode::non_body_analysis());
@ -191,90 +275,15 @@ pub(super) fn specializes(tcx: TyCtxt<'_>, (impl1_def_id, impl2_def_id): (DefId,
// Attempt to prove that impl2 applies, given all of the above.
fulfill_implication(
&infcx,
penv,
param_env,
impl1_trait_header.trait_ref.instantiate_identity(),
impl1_def_id,
impl2_def_id,
|_, _| ObligationCause::dummy(),
&ObligationCause::dummy(),
)
.is_ok()
}
/// Attempt to fulfill all obligations of `target_impl` after unification with
/// `source_trait_ref`. If successful, returns the generic parameters for *all* the
/// generics of `target_impl`, including both those needed to unify with
/// `source_trait_ref` and those whose identity is determined via a where
/// clause in the impl.
fn fulfill_implication<'tcx>(
infcx: &InferCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
source_trait_ref: ty::TraitRef<'tcx>,
source_impl: DefId,
target_impl: DefId,
error_cause: impl Fn(usize, Span) -> ObligationCause<'tcx>,
) -> Result<GenericArgsRef<'tcx>, ()> {
debug!(
"fulfill_implication({:?}, trait_ref={:?} |- {:?} applies)",
param_env, source_trait_ref, target_impl
);
let ocx = ObligationCtxt::new(infcx);
let source_trait_ref = ocx.normalize(&ObligationCause::dummy(), param_env, source_trait_ref);
if !ocx.select_all_or_error().is_empty() {
infcx.dcx().span_delayed_bug(
infcx.tcx.def_span(source_impl),
format!("failed to fully normalize {source_trait_ref}"),
);
}
let source_trait_ref = infcx.resolve_vars_if_possible(source_trait_ref);
let source_trait = ImplSubject::Trait(source_trait_ref);
let selcx = SelectionContext::new(infcx);
let target_args = infcx.fresh_args_for_item(DUMMY_SP, target_impl);
let (target_trait, obligations) =
util::impl_subject_and_oblig(&selcx, param_env, target_impl, target_args, error_cause);
// do the impls unify? If not, no specialization.
let Ok(InferOk { obligations: more_obligations, .. }) = infcx
.at(&ObligationCause::dummy(), param_env)
// Ok to use `Yes`, as all the generic params are already replaced by inference variables,
// which will match the opaque type no matter if it is defining or not.
// Any concrete type that would match the opaque would already be handled by coherence rules,
// and thus either be ok to match here and already have errored, or it won't match, in which
// case there is no issue anyway.
.eq(DefineOpaqueTypes::Yes, source_trait, target_trait)
else {
debug!("fulfill_implication: {:?} does not unify with {:?}", source_trait, target_trait);
return Err(());
};
// attempt to prove all of the predicates for impl2 given those for impl1
// (which are packed up in penv)
ocx.register_obligations(obligations.chain(more_obligations));
let errors = ocx.select_all_or_error();
if !errors.is_empty() {
// no dice!
debug!(
"fulfill_implication: for impls on {:?} and {:?}, \
could not fulfill: {:?} given {:?}",
source_trait,
target_trait,
errors,
param_env.caller_bounds()
);
return Err(());
}
debug!("fulfill_implication: an impl for {:?} specializes {:?}", source_trait, target_trait);
// Now resolve the *generic parameters* we built for the target earlier, replacing
// the inference variables inside with whatever we got from fulfillment.
Ok(infcx.resolve_vars_if_possible(target_args))
}
/// Query provider for `specialization_graph_of`.
pub(super) fn specialization_graph_provider(
tcx: TyCtxt<'_>,

View File

@ -3,19 +3,16 @@ use std::collections::BTreeMap;
use rustc_data_structures::fx::FxIndexMap;
use rustc_errors::Diag;
use rustc_hir::def_id::DefId;
use rustc_infer::infer::{InferCtxt, InferOk};
use rustc_infer::infer::InferCtxt;
pub use rustc_infer::traits::util::*;
use rustc_middle::bug;
use rustc_middle::ty::{
self, GenericArgsRef, ImplSubject, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
TypeVisitableExt, Upcast,
self, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitableExt, Upcast,
};
use rustc_span::Span;
use smallvec::{SmallVec, smallvec};
use tracing::debug;
use super::{NormalizeExt, ObligationCause, PredicateObligation, SelectionContext};
///////////////////////////////////////////////////////////////////////////
// `TraitAliasExpander` iterator
///////////////////////////////////////////////////////////////////////////
@ -166,34 +163,6 @@ impl<'tcx> Iterator for TraitAliasExpander<'tcx> {
// Other
///////////////////////////////////////////////////////////////////////////
/// Instantiate all bound parameters of the impl subject with the given args,
/// returning the resulting subject and all obligations that arise.
/// The obligations are closed under normalization.
pub(crate) fn impl_subject_and_oblig<'a, 'tcx>(
selcx: &SelectionContext<'a, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
impl_def_id: DefId,
impl_args: GenericArgsRef<'tcx>,
cause: impl Fn(usize, Span) -> ObligationCause<'tcx>,
) -> (ImplSubject<'tcx>, impl Iterator<Item = PredicateObligation<'tcx>>) {
let subject = selcx.tcx().impl_subject(impl_def_id);
let subject = subject.instantiate(selcx.tcx(), impl_args);
let InferOk { value: subject, obligations: normalization_obligations1 } =
selcx.infcx.at(&ObligationCause::dummy(), param_env).normalize(subject);
let predicates = selcx.tcx().predicates_of(impl_def_id);
let predicates = predicates.instantiate(selcx.tcx(), impl_args);
let InferOk { value: predicates, obligations: normalization_obligations2 } =
selcx.infcx.at(&ObligationCause::dummy(), param_env).normalize(predicates);
let impl_obligations = super::predicates_for_generics(cause, param_env, predicates);
let impl_obligations =
impl_obligations.chain(normalization_obligations1).chain(normalization_obligations2);
(subject, impl_obligations)
}
/// Casts a trait reference into a reference to one of its super
/// traits; returns `None` if `target_trait_def_id` is not a
/// supertrait.

View File

@ -10,11 +10,13 @@ use crate::convert::Infallible;
use crate::error::Error;
use crate::fmt;
use crate::hash::{self, Hash};
use crate::intrinsics::transmute_unchecked;
use crate::iter::{UncheckedIterator, repeat_n};
use crate::mem::{self, MaybeUninit};
use crate::ops::{
ChangeOutputType, ControlFlow, FromResidual, Index, IndexMut, NeverShortCircuit, Residual, Try,
};
use crate::ptr::{null, null_mut};
use crate::slice::{Iter, IterMut};
mod ascii;
@ -606,8 +608,20 @@ impl<T, const N: usize> [T; N] {
/// assert_eq!(strings.len(), 3);
/// ```
#[stable(feature = "array_methods", since = "1.77.0")]
pub fn each_ref(&self) -> [&T; N] {
from_trusted_iterator(self.iter())
#[rustc_const_unstable(feature = "const_array_each_ref", issue = "133289")]
pub const fn each_ref(&self) -> [&T; N] {
let mut buf = [null::<T>(); N];
// FIXME(const-hack): We would like to simply use iterators for this (as in the original implementation), but this is not allowed in constant expressions.
let mut i = 0;
while i < N {
buf[i] = &raw const self[i];
i += 1;
}
// SAFETY: `*const T` has the same layout as `&T`, and we've also initialised each pointer as a valid reference.
unsafe { transmute_unchecked(buf) }
}
/// Borrows each element mutably and returns an array of mutable references
@ -625,8 +639,20 @@ impl<T, const N: usize> [T; N] {
/// assert_eq!(floats, [0.0, 2.7, -1.0]);
/// ```
#[stable(feature = "array_methods", since = "1.77.0")]
pub fn each_mut(&mut self) -> [&mut T; N] {
from_trusted_iterator(self.iter_mut())
#[rustc_const_unstable(feature = "const_array_each_ref", issue = "133289")]
pub const fn each_mut(&mut self) -> [&mut T; N] {
let mut buf = [null_mut::<T>(); N];
// FIXME(const-hack): We would like to simply use iterators for this (as in the original implementation), but this is not allowed in constant expressions.
let mut i = 0;
while i < N {
buf[i] = &raw mut self[i];
i += 1;
}
// SAFETY: `*mut T` has the same layout as `&mut T`, and we've also initialised each pointer as a valid reference.
unsafe { transmute_unchecked(buf) }
}
/// Divides one array reference into two at an index.

View File

@ -658,6 +658,8 @@ pub mod arch {
pub use std_detect::is_aarch64_feature_detected;
#[unstable(feature = "stdarch_arm_feature_detection", issue = "111190")]
pub use std_detect::is_arm_feature_detected;
#[unstable(feature = "is_loongarch_feature_detected", issue = "117425")]
pub use std_detect::is_loongarch_feature_detected;
#[unstable(feature = "is_riscv_feature_detected", issue = "111192")]
pub use std_detect::is_riscv_feature_detected;
#[stable(feature = "simd_x86", since = "1.27.0")]

View File

@ -173,16 +173,17 @@ impl Drop for OwnedFd {
#[inline]
fn drop(&mut self) {
unsafe {
// Note that errors are ignored when closing a file descriptor. The
// reason for this is that if an error occurs we don't actually know if
// the file descriptor was closed or not, and if we retried (for
// something like EINTR), we might close another valid file descriptor
// opened after we closed ours.
// However, this is usually justified, as some of the major Unices
// do make sure to always close the FD, even when `close()` is interrupted,
// and the scenario is rare to begin with.
// Helpful link to an epic discussion by POSIX workgroup:
// http://austingroupbugs.net/view.php?id=529
// Note that errors are ignored when closing a file descriptor. According to POSIX 2024,
// we can and indeed should retry `close` on `EINTR`
// (https://pubs.opengroup.org/onlinepubs/9799919799.2024edition/functions/close.html),
// but it is not clear yet how well widely-used implementations are conforming with this
// mandate since older versions of POSIX left the state of the FD after an `EINTR`
// unspecified. Ignoring errors is "fine" because some of the major Unices (in
// particular, Linux) do make sure to always close the FD, even when `close()` is
// interrupted, and the scenario is rare to begin with. If we retried on a
// not-POSIX-compliant implementation, the consequences could be really bad since we may
// close the wrong FD. Helpful link to an epic discussion by POSIX workgroup that led to
// the latest POSIX wording: http://austingroupbugs.net/view.php?id=529
#[cfg(not(target_os = "hermit"))]
{
#[cfg(unix)]

View File

@ -12,7 +12,6 @@
#[cfg(not(any(
target_os = "haiku",
target_os = "illumos",
target_os = "rtems",
target_os = "solaris",
target_os = "vita",
)))]
@ -22,7 +21,6 @@ use libc::arc4random_buf;
#[cfg(any(
target_os = "haiku", // See https://git.haiku-os.org/haiku/tree/headers/compatibility/bsd/stdlib.h
target_os = "illumos", // See https://www.illumos.org/man/3C/arc4random
target_os = "rtems", // See https://docs.rtems.org/branches/master/bsp-howto/getentropy.html
target_os = "solaris", // See https://docs.oracle.com/cd/E88353_01/html/E37843/arc4random-3c.html
target_os = "vita", // See https://github.com/vitasdk/newlib/blob/b89e5bc183b516945f9ee07eef483ecb916e45ff/newlib/libc/include/stdlib.h#L74
))]

View File

@ -176,7 +176,7 @@ impl<'scope, 'env> Scope<'scope, 'env> {
/// thread. If the spawned thread panics, [`join`] will return an [`Err`] containing
/// the panic payload.
///
/// If the join handle is dropped, the spawned thread will implicitly joined at the
/// If the join handle is dropped, the spawned thread will be implicitly joined at the
/// end of the scope. In that case, if the spawned thread panics, [`scope`] will
/// panic after all threads are joined.
///

View File

@ -1,9 +0,0 @@
[unstable]
profile-rustflags = true
# Add back the containing directory of the packages we have to refer to using --manifest-path.
# Per-package profiles avoid adding this to build dependencies.
[profile.dev.package."cargo-miri"]
rustflags = ["--remap-path-prefix", "=cargo-miri"]
[profile.dev.package."miri-script"]
rustflags = ["--remap-path-prefix", "=miri-script"]

View File

@ -548,6 +548,7 @@ Definite bugs found:
* [Incorrect offset computation for highly-aligned types in `portable-atomic-util`](https://github.com/taiki-e/portable-atomic/pull/138)
* [Occasional memory leak in `std::mpsc` channels](https://github.com/rust-lang/rust/issues/121582) (original code in [crossbeam](https://github.com/crossbeam-rs/crossbeam/pull/1084))
* [Weak-memory-induced memory leak in Windows thread-local storage](https://github.com/rust-lang/rust/pull/124281)
* [A bug in the new `RwLock::downgrade` implementation](https://rust-lang.zulipchat.com/#narrow/channel/269128-miri/topic/Miri.20error.20library.20test) (caught by Miri before it landed in the Rust repo)
Violations of [Stacked Borrows] found that are likely bugs (but Stacked Borrows is currently just an experiment):

View File

@ -1,13 +1,15 @@
#!/usr/bin/env bash
set -e
# We want to call the binary directly, so we need to know where it ends up.
MIRI_SCRIPT_TARGET_DIR="$(dirname "$0")"/miri-script/target
ROOT_DIR="$(dirname "$0")"
MIRI_SCRIPT_TARGET_DIR="$ROOT_DIR"/miri-script/target
# If stdout is not a terminal and we are not on CI, assume that we are being invoked by RA, and use JSON output.
if ! [ -t 1 ] && [ -z "$CI" ]; then
MESSAGE_FORMAT="--message-format=json"
fi
# We need a nightly toolchain, for the `profile-rustflags` cargo feature.
cargo +nightly build $CARGO_EXTRA_FLAGS --manifest-path "$(dirname "$0")"/miri-script/Cargo.toml \
# We need a nightly toolchain, for `-Zroot-dir`.
cargo +nightly build $CARGO_EXTRA_FLAGS --manifest-path "$ROOT_DIR"/miri-script/Cargo.toml \
-Zroot-dir="$ROOT_DIR" \
-q --target-dir "$MIRI_SCRIPT_TARGET_DIR" $MESSAGE_FORMAT || \
( echo "Failed to build miri-script. Is the 'nightly' toolchain installed?"; exit 1 )
# Instead of doing just `cargo run --manifest-path .. $@`, we invoke miri-script binary directly. Invoking `cargo run` goes through

View File

@ -105,7 +105,7 @@ impl MiriEnv {
// Get extra flags for cargo.
let cargo_extra_flags = std::env::var("CARGO_EXTRA_FLAGS").unwrap_or_default();
let cargo_extra_flags = flagsplit(&cargo_extra_flags);
let mut cargo_extra_flags = flagsplit(&cargo_extra_flags);
if cargo_extra_flags.iter().any(|a| a == "--release" || a.starts_with("--profile")) {
// This makes binaries end up in different paths, let's not do that.
eprintln!(
@ -113,6 +113,8 @@ impl MiriEnv {
);
std::process::exit(1);
}
// Also set `-Zroot-dir` for cargo, to print diagnostics relative to the miri dir.
cargo_extra_flags.push(format!("-Zroot-dir={}", miri_dir.display()));
Ok(MiriEnv { miri_dir, toolchain, sh, sysroot, cargo_extra_flags, libdir })
}

View File

@ -1 +1 @@
668959740f97e7a22ae340742886d330ab63950f
2d0ea7956c45de6e421fd579e2ded27be405dec6

View File

@ -1,6 +1,7 @@
use std::cell::RefCell;
use std::collections::VecDeque;
use std::collections::hash_map::Entry;
use std::default::Default;
use std::ops::Not;
use std::rc::Rc;
use std::time::Duration;
@ -46,8 +47,6 @@ macro_rules! declare_id {
}
pub(super) use declare_id;
declare_id!(MutexId);
/// The mutex state.
#[derive(Default, Debug)]
struct Mutex {
@ -61,6 +60,21 @@ struct Mutex {
clock: VClock,
}
#[derive(Default, Clone, Debug)]
pub struct MutexRef(Rc<RefCell<Mutex>>);
impl MutexRef {
fn new() -> Self {
MutexRef(Rc::new(RefCell::new(Mutex::default())))
}
}
impl VisitProvenance for MutexRef {
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {
// Mutex contains no provenance.
}
}
declare_id!(RwLockId);
/// The read-write lock state.
@ -144,7 +158,6 @@ struct FutexWaiter {
/// The state of all synchronization objects.
#[derive(Default, Debug)]
pub struct SynchronizationObjects {
mutexes: IndexVec<MutexId, Mutex>,
rwlocks: IndexVec<RwLockId, RwLock>,
condvars: IndexVec<CondvarId, Condvar>,
pub(super) init_onces: IndexVec<InitOnceId, InitOnce>,
@ -155,17 +168,17 @@ impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn condvar_reacquire_mutex(
&mut self,
mutex: MutexId,
mutex_ref: &MutexRef,
retval: Scalar,
dest: MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if this.mutex_is_locked(mutex) {
assert_ne!(this.mutex_get_owner(mutex), this.active_thread());
this.mutex_enqueue_and_block(mutex, Some((retval, dest)));
if this.mutex_is_locked(mutex_ref) {
assert_ne!(this.mutex_get_owner(mutex_ref), this.active_thread());
this.mutex_enqueue_and_block(mutex_ref, Some((retval, dest)));
} else {
// We can have it right now!
this.mutex_lock(mutex);
this.mutex_lock(mutex_ref);
// Don't forget to write the return value.
this.write_scalar(retval, &dest)?;
}
@ -174,10 +187,9 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
impl SynchronizationObjects {
pub fn mutex_create(&mut self) -> MutexId {
self.mutexes.push(Default::default())
pub fn mutex_create(&mut self) -> MutexRef {
MutexRef::new()
}
pub fn rwlock_create(&mut self) -> RwLockId {
self.rwlocks.push(Default::default())
}
@ -209,12 +221,16 @@ impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// Helper for lazily initialized `alloc_extra.sync` data:
/// this forces an immediate init.
fn lazy_sync_init<T: 'static + Copy>(
&mut self,
/// Return a reference to the data in the machine state.
fn lazy_sync_init<'a, T: 'static>(
&'a mut self,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
data: T,
) -> InterpResult<'tcx> {
) -> InterpResult<'tcx, &'a T>
where
'tcx: 'a,
{
let this = self.eval_context_mut();
let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
@ -227,7 +243,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
&init_field,
AtomicWriteOrd::Relaxed,
)?;
interp_ok(())
interp_ok(this.get_alloc_extra(alloc)?.get_sync::<T>(offset).unwrap())
}
/// Helper for lazily initialized `alloc_extra.sync` data:
@ -235,13 +251,18 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// - If yes, fetches the data from `alloc_extra.sync`, or calls `missing_data` if that fails
/// and stores that in `alloc_extra.sync`.
/// - Otherwise, calls `new_data` to initialize the primitive.
fn lazy_sync_get_data<T: 'static + Copy>(
&mut self,
///
/// Return a reference to the data in the machine state.
fn lazy_sync_get_data<'a, T: 'static>(
&'a mut self,
primitive: &MPlaceTy<'tcx>,
init_offset: Size,
missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
) -> InterpResult<'tcx, T> {
) -> InterpResult<'tcx, &'a T>
where
'tcx: 'a,
{
let this = self.eval_context_mut();
// Check if this is already initialized. Needs to be atomic because we can race with another
@ -265,17 +286,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// or else it has been moved illegally.
let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
if let Some(data) = alloc_extra.get_sync::<T>(offset) {
interp_ok(*data)
} else {
// Due to borrow checker reasons, we have to do the lookup twice.
if alloc_extra.get_sync::<T>(offset).is_none() {
let data = missing_data()?;
alloc_extra.sync.insert(offset, Box::new(data));
interp_ok(data)
}
interp_ok(alloc_extra.get_sync::<T>(offset).unwrap())
} else {
let data = new_data(this)?;
this.lazy_sync_init(primitive, init_offset, data)?;
interp_ok(data)
this.lazy_sync_init(primitive, init_offset, data)
}
}
@ -311,23 +330,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
#[inline]
/// Get the id of the thread that currently owns this lock.
fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {
let this = self.eval_context_ref();
this.machine.sync.mutexes[id].owner.unwrap()
fn mutex_get_owner(&self, mutex_ref: &MutexRef) -> ThreadId {
mutex_ref.0.borrow().owner.unwrap()
}
#[inline]
/// Check if locked.
fn mutex_is_locked(&self, id: MutexId) -> bool {
let this = self.eval_context_ref();
this.machine.sync.mutexes[id].owner.is_some()
fn mutex_is_locked(&self, mutex_ref: &MutexRef) -> bool {
mutex_ref.0.borrow().owner.is_some()
}
/// Lock by setting the mutex owner and increasing the lock count.
fn mutex_lock(&mut self, id: MutexId) {
fn mutex_lock(&mut self, mutex_ref: &MutexRef) {
let this = self.eval_context_mut();
let thread = this.active_thread();
let mutex = &mut this.machine.sync.mutexes[id];
let mut mutex = mutex_ref.0.borrow_mut();
if let Some(current_owner) = mutex.owner {
assert_eq!(thread, current_owner, "mutex already locked by another thread");
assert!(
@ -347,9 +364,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// count. If the lock count reaches 0, release the lock and potentially
/// give to a new owner. If the lock was not locked by the current thread,
/// return `None`.
fn mutex_unlock(&mut self, id: MutexId) -> InterpResult<'tcx, Option<usize>> {
fn mutex_unlock(&mut self, mutex_ref: &MutexRef) -> InterpResult<'tcx, Option<usize>> {
let this = self.eval_context_mut();
let mutex = &mut this.machine.sync.mutexes[id];
let mut mutex = mutex_ref.0.borrow_mut();
interp_ok(if let Some(current_owner) = mutex.owner {
// Mutex is locked.
if current_owner != this.machine.threads.active_thread() {
@ -367,8 +384,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
mutex.clock.clone_from(clock)
});
}
if let Some(thread) = this.machine.sync.mutexes[id].queue.pop_front() {
this.unblock_thread(thread, BlockReason::Mutex(id))?;
let thread_id = mutex.queue.pop_front();
// We need to drop our mutex borrow before unblock_thread
// because it will be borrowed again in the unblock callback.
drop(mutex);
if thread_id.is_some() {
this.unblock_thread(thread_id.unwrap(), BlockReason::Mutex)?;
}
}
Some(old_lock_count)
@ -385,24 +406,25 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
#[inline]
fn mutex_enqueue_and_block(
&mut self,
id: MutexId,
mutex_ref: &MutexRef,
retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
) {
let this = self.eval_context_mut();
assert!(this.mutex_is_locked(id), "queing on unlocked mutex");
assert!(this.mutex_is_locked(mutex_ref), "queuing on unlocked mutex");
let thread = this.active_thread();
this.machine.sync.mutexes[id].queue.push_back(thread);
mutex_ref.0.borrow_mut().queue.push_back(thread);
let mutex_ref = mutex_ref.clone();
this.block_thread(
BlockReason::Mutex(id),
BlockReason::Mutex,
None,
callback!(
@capture<'tcx> {
id: MutexId,
mutex_ref: MutexRef,
retval_dest: Option<(Scalar, MPlaceTy<'tcx>)>,
}
@unblock = |this| {
assert!(!this.mutex_is_locked(id));
this.mutex_lock(id);
assert!(!this.mutex_is_locked(&mutex_ref));
this.mutex_lock(&mutex_ref);
if let Some((retval, dest)) = retval_dest {
this.write_scalar(retval, &dest)?;
@ -623,14 +645,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn condvar_wait(
&mut self,
condvar: CondvarId,
mutex: MutexId,
mutex_ref: MutexRef,
timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
retval_succ: Scalar,
retval_timeout: Scalar,
dest: MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if let Some(old_locked_count) = this.mutex_unlock(mutex)? {
if let Some(old_locked_count) = this.mutex_unlock(&mutex_ref)? {
if old_locked_count != 1 {
throw_unsup_format!(
"awaiting a condvar on a mutex acquired multiple times is not supported"
@ -650,7 +672,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
callback!(
@capture<'tcx> {
condvar: CondvarId,
mutex: MutexId,
mutex_ref: MutexRef,
retval_succ: Scalar,
retval_timeout: Scalar,
dest: MPlaceTy<'tcx>,
@ -665,7 +687,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
// Try to acquire the mutex.
// The timeout only applies to the first wait (until the signal), not for mutex acquisition.
this.condvar_reacquire_mutex(mutex, retval_succ, dest)
this.condvar_reacquire_mutex(&mutex_ref, retval_succ, dest)
}
@timeout = |this| {
// We have to remove the waiter from the queue again.
@ -673,7 +695,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let waiters = &mut this.machine.sync.condvars[condvar].waiters;
waiters.retain(|waiter| *waiter != thread);
// Now get back the lock.
this.condvar_reacquire_mutex(mutex, retval_timeout, dest)
this.condvar_reacquire_mutex(&mutex_ref, retval_timeout, dest)
}
),
);

View File

@ -113,6 +113,11 @@ impl ThreadId {
self.0
}
/// Create a new thread id from a `u32` without checking if this thread exists.
pub fn new_unchecked(id: u32) -> Self {
Self(id)
}
pub const MAIN_THREAD: ThreadId = ThreadId(0);
}
@ -141,7 +146,7 @@ pub enum BlockReason {
/// Waiting for time to pass.
Sleep,
/// Blocked on a mutex.
Mutex(MutexId),
Mutex,
/// Blocked on a condition variable.
Condvar(CondvarId),
/// Blocked on a reader-writer lock.
@ -152,6 +157,8 @@ pub enum BlockReason {
InitOnce(InitOnceId),
/// Blocked on epoll.
Epoll,
/// Blocked on eventfd.
Eventfd,
}
/// The state of a thread.

View File

@ -195,7 +195,7 @@ pub fn prune_stacktrace<'tcx>(
// This len check ensures that we don't somehow remove every frame, as doing so breaks
// the primary error message.
while stacktrace.len() > 1
&& stacktrace.last().map_or(false, |frame| !machine.is_local(frame))
&& stacktrace.last().is_some_and(|frame| !machine.is_local(frame))
{
stacktrace.pop();
}

View File

@ -270,12 +270,8 @@ pub fn create_ecx<'tcx>(
) -> InterpResult<'tcx, InterpCx<'tcx, MiriMachine<'tcx>>> {
let typing_env = ty::TypingEnv::fully_monomorphized();
let layout_cx = LayoutCx::new(tcx, typing_env);
let mut ecx = InterpCx::new(
tcx,
rustc_span::DUMMY_SP,
typing_env,
MiriMachine::new(config, layout_cx)
);
let mut ecx =
InterpCx::new(tcx, rustc_span::DUMMY_SP, typing_env, MiriMachine::new(config, layout_cx));
// Some parts of initialization require a full `InterpCx`.
MiriMachine::late_init(&mut ecx, config, {

View File

@ -116,8 +116,7 @@ pub fn resolve_path<'tcx>(
/// Gets the layout of a type at a path.
#[track_caller]
pub fn path_ty_layout<'tcx>(cx: &impl LayoutOf<'tcx>, path: &[&str]) -> TyAndLayout<'tcx> {
let ty = resolve_path(cx.tcx(), path, Namespace::TypeNS)
.ty(cx.tcx(), cx.typing_env());
let ty = resolve_path(cx.tcx(), path, Namespace::TypeNS).ty(cx.tcx(), cx.typing_env());
cx.layout_of(ty).to_result().ok().unwrap()
}
@ -1009,7 +1008,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_ref();
fn float_to_int_inner<'tcx, F: rustc_apfloat::Float>(
this: &MiriInterpCx<'tcx>,
ecx: &MiriInterpCx<'tcx>,
src: F,
cast_to: TyAndLayout<'tcx>,
round: rustc_apfloat::Round,
@ -1029,7 +1028,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Nothing else
_ =>
span_bug!(
this.cur_span(),
ecx.cur_span(),
"attempted float-to-int conversion with non-int output type {}",
cast_to.ty,
),

View File

@ -218,20 +218,19 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
=> {
let [f] = check_arg_count(args)?;
let f = this.read_scalar(f)?.to_f32()?;
// Using host floats (but it's fine, these operations do not have guaranteed precision).
let f_host = f.to_host();
// Using host floats except for sqrt (but it's fine, these operations do not have
// guaranteed precision).
let res = match intrinsic_name {
"sinf32" => f_host.sin(),
"cosf32" => f_host.cos(),
"sqrtf32" => f_host.sqrt(), // FIXME Using host floats, this should use full-precision soft-floats
"expf32" => f_host.exp(),
"exp2f32" => f_host.exp2(),
"logf32" => f_host.ln(),
"log10f32" => f_host.log10(),
"log2f32" => f_host.log2(),
"sinf32" => f.to_host().sin().to_soft(),
"cosf32" => f.to_host().cos().to_soft(),
"sqrtf32" => math::sqrt(f),
"expf32" => f.to_host().exp().to_soft(),
"exp2f32" => f.to_host().exp2().to_soft(),
"logf32" => f.to_host().ln().to_soft(),
"log10f32" => f.to_host().log10().to_soft(),
"log2f32" => f.to_host().log2().to_soft(),
_ => bug!(),
};
let res = res.to_soft();
let res = this.adjust_nan(res, &[f]);
this.write_scalar(res, dest)?;
}
@ -247,20 +246,19 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
=> {
let [f] = check_arg_count(args)?;
let f = this.read_scalar(f)?.to_f64()?;
// Using host floats (but it's fine, these operations do not have guaranteed precision).
let f_host = f.to_host();
// Using host floats except for sqrt (but it's fine, these operations do not have
// guaranteed precision).
let res = match intrinsic_name {
"sinf64" => f_host.sin(),
"cosf64" => f_host.cos(),
"sqrtf64" => f_host.sqrt(), // FIXME Using host floats, this should use full-precision soft-floats
"expf64" => f_host.exp(),
"exp2f64" => f_host.exp2(),
"logf64" => f_host.ln(),
"log10f64" => f_host.log10(),
"log2f64" => f_host.log2(),
"sinf64" => f.to_host().sin().to_soft(),
"cosf64" => f.to_host().cos().to_soft(),
"sqrtf64" => math::sqrt(f),
"expf64" => f.to_host().exp().to_soft(),
"exp2f64" => f.to_host().exp2().to_soft(),
"logf64" => f.to_host().ln().to_soft(),
"log10f64" => f.to_host().log10().to_soft(),
"log2f64" => f.to_host().log2().to_soft(),
_ => bug!(),
};
let res = res.to_soft();
let res = this.adjust_nan(res, &[f]);
this.write_scalar(res, dest)?;
}

View File

@ -104,42 +104,39 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let ty::Float(float_ty) = op.layout.ty.kind() else {
span_bug!(this.cur_span(), "{} operand is not a float", intrinsic_name)
};
// Using host floats (but it's fine, these operations do not have guaranteed precision).
// Using host floats except for sqrt (but it's fine, these operations do not
// have guaranteed precision).
match float_ty {
FloatTy::F16 => unimplemented!("f16_f128"),
FloatTy::F32 => {
let f = op.to_scalar().to_f32()?;
let f_host = f.to_host();
let res = match host_op {
"fsqrt" => f_host.sqrt(), // FIXME Using host floats, this should use full-precision soft-floats
"fsin" => f_host.sin(),
"fcos" => f_host.cos(),
"fexp" => f_host.exp(),
"fexp2" => f_host.exp2(),
"flog" => f_host.ln(),
"flog2" => f_host.log2(),
"flog10" => f_host.log10(),
"fsqrt" => math::sqrt(f),
"fsin" => f.to_host().sin().to_soft(),
"fcos" => f.to_host().cos().to_soft(),
"fexp" => f.to_host().exp().to_soft(),
"fexp2" => f.to_host().exp2().to_soft(),
"flog" => f.to_host().ln().to_soft(),
"flog2" => f.to_host().log2().to_soft(),
"flog10" => f.to_host().log10().to_soft(),
_ => bug!(),
};
let res = res.to_soft();
let res = this.adjust_nan(res, &[f]);
Scalar::from(res)
}
FloatTy::F64 => {
let f = op.to_scalar().to_f64()?;
let f_host = f.to_host();
let res = match host_op {
"fsqrt" => f_host.sqrt(),
"fsin" => f_host.sin(),
"fcos" => f_host.cos(),
"fexp" => f_host.exp(),
"fexp2" => f_host.exp2(),
"flog" => f_host.ln(),
"flog2" => f_host.log2(),
"flog10" => f_host.log10(),
"fsqrt" => math::sqrt(f),
"fsin" => f.to_host().sin().to_soft(),
"fcos" => f.to_host().cos().to_soft(),
"fexp" => f.to_host().exp().to_soft(),
"fexp2" => f.to_host().exp2().to_soft(),
"flog" => f.to_host().ln().to_soft(),
"flog2" => f.to_host().log2().to_soft(),
"flog10" => f.to_host().log10().to_soft(),
_ => bug!(),
};
let res = res.to_soft();
let res = this.adjust_nan(res, &[f]);
Scalar::from(res)
}

View File

@ -83,6 +83,7 @@ mod eval;
mod helpers;
mod intrinsics;
mod machine;
mod math;
mod mono_hash_map;
mod operator;
mod provenance_gc;
@ -122,7 +123,7 @@ pub use crate::concurrency::data_race::{
};
pub use crate::concurrency::init_once::{EvalContextExt as _, InitOnceId};
pub use crate::concurrency::sync::{
CondvarId, EvalContextExt as _, MutexId, RwLockId, SynchronizationObjects,
CondvarId, EvalContextExt as _, MutexRef, RwLockId, SynchronizationObjects,
};
pub use crate::concurrency::thread::{
BlockReason, EvalContextExt as _, StackEmptyCallback, ThreadId, ThreadManager, TimeoutAnchor,

View File

@ -732,20 +732,20 @@ impl<'tcx> MiriMachine<'tcx> {
}
pub(crate) fn late_init(
this: &mut MiriInterpCx<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
config: &MiriConfig,
on_main_stack_empty: StackEmptyCallback<'tcx>,
) -> InterpResult<'tcx> {
EnvVars::init(this, config)?;
MiriMachine::init_extern_statics(this)?;
ThreadManager::init(this, on_main_stack_empty);
EnvVars::init(ecx, config)?;
MiriMachine::init_extern_statics(ecx)?;
ThreadManager::init(ecx, on_main_stack_empty);
interp_ok(())
}
pub(crate) fn add_extern_static(this: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
pub(crate) fn add_extern_static(ecx: &mut MiriInterpCx<'tcx>, name: &str, ptr: Pointer) {
// This got just allocated, so there definitely is a pointer here.
let ptr = ptr.into_pointer_or_addr().unwrap();
this.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
ecx.machine.extern_statics.try_insert(Symbol::intern(name), ptr).unwrap();
}
pub(crate) fn communicate(&self) -> bool {

164
src/tools/miri/src/math.rs Normal file
View File

@ -0,0 +1,164 @@
use rand::Rng as _;
use rand::distributions::Distribution as _;
use rustc_apfloat::Float as _;
use rustc_apfloat::ieee::IeeeFloat;
/// Disturbes a floating-point result by a relative error on the order of (-2^scale, 2^scale).
pub(crate) fn apply_random_float_error<F: rustc_apfloat::Float>(
ecx: &mut crate::MiriInterpCx<'_>,
val: F,
err_scale: i32,
) -> F {
let rng = ecx.machine.rng.get_mut();
// Generate a random integer in the range [0, 2^PREC).
let dist = rand::distributions::Uniform::new(0, 1 << F::PRECISION);
let err = F::from_u128(dist.sample(rng))
.value
.scalbn(err_scale.strict_sub(F::PRECISION.try_into().unwrap()));
// give it a random sign
let err = if rng.gen::<bool>() { -err } else { err };
// multiple the value with (1+err)
(val * (F::from_u128(1).value + err).value).value
}
pub(crate) fn sqrt<S: rustc_apfloat::ieee::Semantics>(x: IeeeFloat<S>) -> IeeeFloat<S> {
match x.category() {
// preserve zero sign
rustc_apfloat::Category::Zero => x,
// propagate NaN
rustc_apfloat::Category::NaN => x,
// sqrt of negative number is NaN
_ if x.is_negative() => IeeeFloat::NAN,
// sqrt(∞) = ∞
rustc_apfloat::Category::Infinity => IeeeFloat::INFINITY,
rustc_apfloat::Category::Normal => {
// Floating point precision, excluding the integer bit
let prec = i32::try_from(S::PRECISION).unwrap() - 1;
// x = 2^(exp - prec) * mant
// where mant is an integer with prec+1 bits
// mant is a u128, which should be large enough for the largest prec (112 for f128)
let mut exp = x.ilogb();
let mut mant = x.scalbn(prec - exp).to_u128(128).value;
if exp % 2 != 0 {
// Make exponent even, so it can be divided by 2
exp -= 1;
mant <<= 1;
}
// Bit-by-bit (base-2 digit-by-digit) sqrt of mant.
// mant is treated here as a fixed point number with prec fractional bits.
// mant will be shifted left by one bit to have an extra fractional bit, which
// will be used to determine the rounding direction.
// res is the truncated sqrt of mant, where one bit is added at each iteration.
let mut res = 0u128;
// rem is the remainder with the current res
// rem_i = 2^i * ((mant<<1) - res_i^2)
// starting with res = 0, rem = mant<<1
let mut rem = mant << 1;
// s_i = 2*res_i
let mut s = 0u128;
// d is used to iterate over bits, from high to low (d_i = 2^(-i))
let mut d = 1u128 << (prec + 1);
// For iteration j=i+1, we need to find largest b_j = 0 or 1 such that
// (res_i + b_j * 2^(-j))^2 <= mant<<1
// Expanding (a + b)^2 = a^2 + b^2 + 2*a*b:
// res_i^2 + (b_j * 2^(-j))^2 + 2 * res_i * b_j * 2^(-j) <= mant<<1
// And rearranging the terms:
// b_j^2 * 2^(-j) + 2 * res_i * b_j <= 2^j * (mant<<1 - res_i^2)
// b_j^2 * 2^(-j) + 2 * res_i * b_j <= rem_i
while d != 0 {
// Probe b_j^2 * 2^(-j) + 2 * res_i * b_j <= rem_i with b_j = 1:
// t = 2*res_i + 2^(-j)
let t = s + d;
if rem >= t {
// b_j should be 1, so make res_j = res_i + 2^(-j) and adjust rem
res += d;
s += d + d;
rem -= t;
}
// Adjust rem for next iteration
rem <<= 1;
// Shift iterator
d >>= 1;
}
// Remove extra fractional bit from result, rounding to nearest.
// If the last bit is 0, then the nearest neighbor is definitely the lower one.
// If the last bit is 1, it sounds like this may either be a tie (if there's
// infinitely many 0s after this 1), or the nearest neighbor is the upper one.
// However, since square roots are either exact or irrational, and an exact root
// would lead to the last "extra" bit being 0, we can exclude a tie in this case.
// We therefore always round up if the last bit is 1. When the last bit is 0,
// adding 1 will not do anything since the shift will discard it.
res = (res + 1) >> 1;
// Build resulting value with res as mantissa and exp/2 as exponent
IeeeFloat::from_u128(res).value.scalbn(exp / 2 - prec)
}
}
}
#[cfg(test)]
mod tests {
use rustc_apfloat::ieee::{DoubleS, HalfS, IeeeFloat, QuadS, SingleS};
use super::sqrt;
#[test]
fn test_sqrt() {
#[track_caller]
fn test<S: rustc_apfloat::ieee::Semantics>(x: &str, expected: &str) {
let x: IeeeFloat<S> = x.parse().unwrap();
let expected: IeeeFloat<S> = expected.parse().unwrap();
let result = sqrt(x);
assert_eq!(result, expected);
}
fn exact_tests<S: rustc_apfloat::ieee::Semantics>() {
test::<S>("0", "0");
test::<S>("1", "1");
test::<S>("1.5625", "1.25");
test::<S>("2.25", "1.5");
test::<S>("4", "2");
test::<S>("5.0625", "2.25");
test::<S>("9", "3");
test::<S>("16", "4");
test::<S>("25", "5");
test::<S>("36", "6");
test::<S>("49", "7");
test::<S>("64", "8");
test::<S>("81", "9");
test::<S>("100", "10");
test::<S>("0.5625", "0.75");
test::<S>("0.25", "0.5");
test::<S>("0.0625", "0.25");
test::<S>("0.00390625", "0.0625");
}
exact_tests::<HalfS>();
exact_tests::<SingleS>();
exact_tests::<DoubleS>();
exact_tests::<QuadS>();
test::<SingleS>("2", "1.4142135");
test::<DoubleS>("2", "1.4142135623730951");
test::<SingleS>("1.1", "1.0488088");
test::<DoubleS>("1.1", "1.0488088481701516");
test::<SingleS>("2.2", "1.4832398");
test::<DoubleS>("2.2", "1.4832396974191326");
test::<SingleS>("1.22101e-40", "1.10499205e-20");
test::<DoubleS>("1.22101e-310", "1.1049932126488395e-155");
test::<SingleS>("3.4028235e38", "1.8446743e19");
test::<DoubleS>("1.7976931348623157e308", "1.3407807929942596e154");
}
}

View File

@ -195,10 +195,10 @@ impl LiveAllocs<'_, '_> {
}
}
fn remove_unreachable_tags<'tcx>(this: &mut MiriInterpCx<'tcx>, tags: FxHashSet<BorTag>) {
fn remove_unreachable_tags<'tcx>(ecx: &mut MiriInterpCx<'tcx>, tags: FxHashSet<BorTag>) {
// Avoid iterating all allocations if there's no borrow tracker anyway.
if this.machine.borrow_tracker.is_some() {
this.memory.alloc_map().iter(|it| {
if ecx.machine.borrow_tracker.is_some() {
ecx.memory.alloc_map().iter(|it| {
for (_id, (_kind, alloc)) in it {
alloc.extra.borrow_tracker.as_ref().unwrap().remove_unreachable_tags(&tags);
}
@ -206,16 +206,16 @@ fn remove_unreachable_tags<'tcx>(this: &mut MiriInterpCx<'tcx>, tags: FxHashSet<
}
}
fn remove_unreachable_allocs<'tcx>(this: &mut MiriInterpCx<'tcx>, allocs: FxHashSet<AllocId>) {
let allocs = LiveAllocs { ecx: this, collected: allocs };
this.machine.allocation_spans.borrow_mut().retain(|id, _| allocs.is_live(*id));
this.machine.symbolic_alignment.borrow_mut().retain(|id, _| allocs.is_live(*id));
this.machine.alloc_addresses.borrow_mut().remove_unreachable_allocs(&allocs);
if let Some(borrow_tracker) = &this.machine.borrow_tracker {
fn remove_unreachable_allocs<'tcx>(ecx: &mut MiriInterpCx<'tcx>, allocs: FxHashSet<AllocId>) {
let allocs = LiveAllocs { ecx, collected: allocs };
ecx.machine.allocation_spans.borrow_mut().retain(|id, _| allocs.is_live(*id));
ecx.machine.symbolic_alignment.borrow_mut().retain(|id, _| allocs.is_live(*id));
ecx.machine.alloc_addresses.borrow_mut().remove_unreachable_allocs(&allocs);
if let Some(borrow_tracker) = &ecx.machine.borrow_tracker {
borrow_tracker.borrow_mut().remove_unreachable_allocs(&allocs);
}
// Clean up core (non-Miri-specific) state.
this.remove_unreachable_allocs(&allocs.collected);
ecx.remove_unreachable_allocs(&allocs.collected);
}
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}

View File

@ -4,13 +4,13 @@ use crate::*;
impl<'tcx> MiriMachine<'tcx> {
fn alloc_extern_static(
this: &mut MiriInterpCx<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
name: &str,
val: ImmTy<'tcx>,
) -> InterpResult<'tcx> {
let place = this.allocate(val.layout, MiriMemoryKind::ExternStatic.into())?;
this.write_immediate(*val, &place)?;
Self::add_extern_static(this, name, place.ptr());
let place = ecx.allocate(val.layout, MiriMemoryKind::ExternStatic.into())?;
ecx.write_immediate(*val, &place)?;
Self::add_extern_static(ecx, name, place.ptr());
interp_ok(())
}
@ -18,72 +18,69 @@ impl<'tcx> MiriMachine<'tcx> {
/// Most of them are for weak symbols, which we all set to null (indicating that the
/// symbol is not supported, and triggering fallback code which ends up calling
/// some other shim that we do support).
fn null_ptr_extern_statics(
this: &mut MiriInterpCx<'tcx>,
names: &[&str],
) -> InterpResult<'tcx> {
fn null_ptr_extern_statics(ecx: &mut MiriInterpCx<'tcx>, names: &[&str]) -> InterpResult<'tcx> {
for name in names {
let val = ImmTy::from_int(0, this.machine.layouts.usize);
Self::alloc_extern_static(this, name, val)?;
let val = ImmTy::from_int(0, ecx.machine.layouts.usize);
Self::alloc_extern_static(ecx, name, val)?;
}
interp_ok(())
}
/// Extern statics that are initialized with function pointers to the symbols of the same name.
fn weak_symbol_extern_statics(
this: &mut MiriInterpCx<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
names: &[&str],
) -> InterpResult<'tcx> {
for name in names {
assert!(this.is_dyn_sym(name), "{name} is not a dynamic symbol");
let layout = this.machine.layouts.const_raw_ptr;
let ptr = this.fn_ptr(FnVal::Other(DynSym::from_str(name)));
let val = ImmTy::from_scalar(Scalar::from_pointer(ptr, this), layout);
Self::alloc_extern_static(this, name, val)?;
assert!(ecx.is_dyn_sym(name), "{name} is not a dynamic symbol");
let layout = ecx.machine.layouts.const_raw_ptr;
let ptr = ecx.fn_ptr(FnVal::Other(DynSym::from_str(name)));
let val = ImmTy::from_scalar(Scalar::from_pointer(ptr, ecx), layout);
Self::alloc_extern_static(ecx, name, val)?;
}
interp_ok(())
}
/// Sets up the "extern statics" for this machine.
pub fn init_extern_statics(this: &mut MiriInterpCx<'tcx>) -> InterpResult<'tcx> {
pub fn init_extern_statics(ecx: &mut MiriInterpCx<'tcx>) -> InterpResult<'tcx> {
// "__rust_no_alloc_shim_is_unstable"
let val = ImmTy::from_int(0, this.machine.layouts.u8); // always 0, value does not matter
Self::alloc_extern_static(this, "__rust_no_alloc_shim_is_unstable", val)?;
let val = ImmTy::from_int(0, ecx.machine.layouts.u8); // always 0, value does not matter
Self::alloc_extern_static(ecx, "__rust_no_alloc_shim_is_unstable", val)?;
// "__rust_alloc_error_handler_should_panic"
let val = this.tcx.sess.opts.unstable_opts.oom.should_panic();
let val = ImmTy::from_int(val, this.machine.layouts.u8);
Self::alloc_extern_static(this, "__rust_alloc_error_handler_should_panic", val)?;
let val = ecx.tcx.sess.opts.unstable_opts.oom.should_panic();
let val = ImmTy::from_int(val, ecx.machine.layouts.u8);
Self::alloc_extern_static(ecx, "__rust_alloc_error_handler_should_panic", val)?;
if this.target_os_is_unix() {
if ecx.target_os_is_unix() {
// "environ" is mandated by POSIX.
let environ = this.machine.env_vars.unix().environ();
Self::add_extern_static(this, "environ", environ);
let environ = ecx.machine.env_vars.unix().environ();
Self::add_extern_static(ecx, "environ", environ);
}
match this.tcx.sess.target.os.as_ref() {
match ecx.tcx.sess.target.os.as_ref() {
"linux" => {
Self::null_ptr_extern_statics(this, &[
Self::null_ptr_extern_statics(ecx, &[
"__cxa_thread_atexit_impl",
"__clock_gettime64",
])?;
Self::weak_symbol_extern_statics(this, &["getrandom", "statx"])?;
Self::weak_symbol_extern_statics(ecx, &["getrandom", "statx"])?;
}
"freebsd" => {
Self::null_ptr_extern_statics(this, &["__cxa_thread_atexit_impl"])?;
Self::null_ptr_extern_statics(ecx, &["__cxa_thread_atexit_impl"])?;
}
"android" => {
Self::null_ptr_extern_statics(this, &["bsd_signal"])?;
Self::weak_symbol_extern_statics(this, &["signal", "getrandom"])?;
Self::null_ptr_extern_statics(ecx, &["bsd_signal"])?;
Self::weak_symbol_extern_statics(ecx, &["signal", "getrandom"])?;
}
"windows" => {
// "_tls_used"
// This is some obscure hack that is part of the Windows TLS story. It's a `u8`.
let val = ImmTy::from_int(0, this.machine.layouts.u8);
Self::alloc_extern_static(this, "_tls_used", val)?;
let val = ImmTy::from_int(0, ecx.machine.layouts.u8);
Self::alloc_extern_static(ecx, "_tls_used", val)?;
}
"illumos" | "solaris" => {
Self::weak_symbol_extern_statics(this, &["pthread_setname_np"])?;
Self::weak_symbol_extern_statics(ecx, &["pthread_setname_np"])?;
}
_ => {} // No "extern statics" supported on this target
}

View File

@ -496,14 +496,14 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Rust allocation
"__rust_alloc" | "miri_alloc" => {
let default = |this: &mut MiriInterpCx<'tcx>| {
let default = |ecx: &mut MiriInterpCx<'tcx>| {
// Only call `check_shim` when `#[global_allocator]` isn't used. When that
// macro is used, we act like no shim exists, so that the exported function can run.
let [size, align] = this.check_shim(abi, ExternAbi::Rust, link_name, args)?;
let size = this.read_target_usize(size)?;
let align = this.read_target_usize(align)?;
let [size, align] = ecx.check_shim(abi, ExternAbi::Rust, link_name, args)?;
let size = ecx.read_target_usize(size)?;
let align = ecx.read_target_usize(align)?;
this.check_rustc_alloc_request(size, align)?;
ecx.check_rustc_alloc_request(size, align)?;
let memory_kind = match link_name.as_str() {
"__rust_alloc" => MiriMemoryKind::Rust,
@ -511,13 +511,13 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
_ => unreachable!(),
};
let ptr = this.allocate_ptr(
let ptr = ecx.allocate_ptr(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
memory_kind.into(),
)?;
this.write_pointer(ptr, dest)
ecx.write_pointer(ptr, dest)
};
match link_name.as_str() {
@ -555,14 +555,14 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
});
}
"__rust_dealloc" | "miri_dealloc" => {
let default = |this: &mut MiriInterpCx<'tcx>| {
let default = |ecx: &mut MiriInterpCx<'tcx>| {
// See the comment for `__rust_alloc` why `check_shim` is only called in the
// default case.
let [ptr, old_size, align] =
this.check_shim(abi, ExternAbi::Rust, link_name, args)?;
let ptr = this.read_pointer(ptr)?;
let old_size = this.read_target_usize(old_size)?;
let align = this.read_target_usize(align)?;
ecx.check_shim(abi, ExternAbi::Rust, link_name, args)?;
let ptr = ecx.read_pointer(ptr)?;
let old_size = ecx.read_target_usize(old_size)?;
let align = ecx.read_target_usize(align)?;
let memory_kind = match link_name.as_str() {
"__rust_dealloc" => MiriMemoryKind::Rust,
@ -571,7 +571,7 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
};
// No need to check old_size/align; we anyway check that they match the allocation.
this.deallocate_ptr(
ecx.deallocate_ptr(
ptr,
Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
memory_kind.into(),

View File

@ -8,7 +8,7 @@ use crate::*;
const TASK_COMM_LEN: usize = 16;
pub fn prctl<'tcx>(
this: &mut MiriInterpCx<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
link_name: Symbol,
abi: ExternAbi,
args: &[OpTy<'tcx>],
@ -16,41 +16,41 @@ pub fn prctl<'tcx>(
) -> InterpResult<'tcx> {
// We do not use `check_shim` here because `prctl` is variadic. The argument
// count is checked bellow.
this.check_abi_and_shim_symbol_clash(abi, ExternAbi::C { unwind: false }, link_name)?;
ecx.check_abi_and_shim_symbol_clash(abi, ExternAbi::C { unwind: false }, link_name)?;
// FIXME: Use constants once https://github.com/rust-lang/libc/pull/3941 backported to the 0.2 branch.
let pr_set_name = 15;
let pr_get_name = 16;
let [op] = check_min_arg_count("prctl", args)?;
let res = match this.read_scalar(op)?.to_i32()? {
let res = match ecx.read_scalar(op)?.to_i32()? {
op if op == pr_set_name => {
let [_, name] = check_min_arg_count("prctl(PR_SET_NAME, ...)", args)?;
let name = this.read_scalar(name)?;
let thread = this.pthread_self()?;
let name = ecx.read_scalar(name)?;
let thread = ecx.pthread_self()?;
// The Linux kernel silently truncates long names.
// https://www.man7.org/linux/man-pages/man2/PR_SET_NAME.2const.html
let res =
this.pthread_setname_np(thread, name, TASK_COMM_LEN, /* truncate */ true)?;
ecx.pthread_setname_np(thread, name, TASK_COMM_LEN, /* truncate */ true)?;
assert_eq!(res, ThreadNameResult::Ok);
Scalar::from_u32(0)
}
op if op == pr_get_name => {
let [_, name] = check_min_arg_count("prctl(PR_GET_NAME, ...)", args)?;
let name = this.read_scalar(name)?;
let thread = this.pthread_self()?;
let len = Scalar::from_target_usize(TASK_COMM_LEN as u64, this);
this.check_ptr_access(
name.to_pointer(this)?,
let name = ecx.read_scalar(name)?;
let thread = ecx.pthread_self()?;
let len = Scalar::from_target_usize(TASK_COMM_LEN as u64, ecx);
ecx.check_ptr_access(
name.to_pointer(ecx)?,
Size::from_bytes(TASK_COMM_LEN),
CheckInAllocMsg::MemoryAccessTest,
)?;
let res = this.pthread_getname_np(thread, name, len, /* truncate*/ false)?;
let res = ecx.pthread_getname_np(thread, name, len, /* truncate*/ false)?;
assert_eq!(res, ThreadNameResult::Ok);
Scalar::from_u32(0)
}
op => throw_unsup_format!("Miri does not support `prctl` syscall with op={}", op),
};
this.write_scalar(res, dest)?;
ecx.write_scalar(res, dest)?;
interp_ok(())
}

View File

@ -4,7 +4,7 @@ use std::io;
use std::io::ErrorKind;
use crate::concurrency::VClock;
use crate::shims::unix::fd::FileDescriptionRef;
use crate::shims::unix::fd::{FileDescriptionRef, WeakFileDescriptionRef};
use crate::shims::unix::linux::epoll::{EpollReadyEvents, EvalContextExt as _};
use crate::shims::unix::*;
use crate::*;
@ -26,6 +26,10 @@ struct Event {
counter: Cell<u64>,
is_nonblock: bool,
clock: RefCell<VClock>,
/// A list of thread ids blocked on eventfd::read.
blocked_read_tid: RefCell<Vec<ThreadId>>,
/// A list of thread ids blocked on eventfd::write.
blocked_write_tid: RefCell<Vec<ThreadId>>,
}
impl FileDescription for Event {
@ -72,31 +76,8 @@ impl FileDescription for Event {
// eventfd read at the size of u64.
let buf_place = ecx.ptr_to_mplace_unaligned(ptr, ty);
// Block when counter == 0.
let counter = self.counter.get();
if counter == 0 {
if self.is_nonblock {
return ecx.set_last_error_and_return(ErrorKind::WouldBlock, dest);
}
throw_unsup_format!("eventfd: blocking is unsupported");
} else {
// Synchronize with all prior `write` calls to this FD.
ecx.acquire_clock(&self.clock.borrow());
// Give old counter value to userspace, and set counter value to 0.
ecx.write_int(counter, &buf_place)?;
self.counter.set(0);
// When any of the event happened, we check and update the status of all supported event
// types for current file description.
ecx.check_and_update_readiness(self_ref)?;
// Tell userspace how many bytes we wrote.
ecx.write_int(buf_place.layout.size.bytes(), dest)?;
}
interp_ok(())
let weak_eventfd = self_ref.downgrade();
eventfd_read(buf_place, dest, weak_eventfd, ecx)
}
/// A write call adds the 8-byte integer value supplied in
@ -127,7 +108,7 @@ impl FileDescription for Event {
return ecx.set_last_error_and_return(ErrorKind::InvalidInput, dest);
}
// Read the user supplied value from the pointer.
// Read the user-supplied value from the pointer.
let buf_place = ecx.ptr_to_mplace_unaligned(ptr, ty);
let num = ecx.read_scalar(&buf_place)?.to_u64()?;
@ -137,27 +118,8 @@ impl FileDescription for Event {
}
// If the addition does not let the counter to exceed the maximum value, update the counter.
// Else, block.
match self.counter.get().checked_add(num) {
Some(new_count @ 0..=MAX_COUNTER) => {
// Future `read` calls will synchronize with this write, so update the FD clock.
ecx.release_clock(|clock| {
self.clock.borrow_mut().join(clock);
});
self.counter.set(new_count);
}
None | Some(u64::MAX) =>
if self.is_nonblock {
return ecx.set_last_error_and_return(ErrorKind::WouldBlock, dest);
} else {
throw_unsup_format!("eventfd: blocking is unsupported");
},
};
// When any of the event happened, we check and update the status of all supported event
// types for current file description.
ecx.check_and_update_readiness(self_ref)?;
// Return how many bytes we read.
ecx.write_int(buf_place.layout.size.bytes(), dest)
let weak_eventfd = self_ref.downgrade();
eventfd_write(num, buf_place, dest, weak_eventfd, ecx)
}
}
@ -217,8 +179,151 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
counter: Cell::new(val.into()),
is_nonblock,
clock: RefCell::new(VClock::default()),
blocked_read_tid: RefCell::new(Vec::new()),
blocked_write_tid: RefCell::new(Vec::new()),
});
interp_ok(Scalar::from_i32(fd_value))
}
}
/// Block thread if the value addition will exceed u64::MAX -1,
/// else just add the user-supplied value to current counter.
fn eventfd_write<'tcx>(
num: u64,
buf_place: MPlaceTy<'tcx>,
dest: &MPlaceTy<'tcx>,
weak_eventfd: WeakFileDescriptionRef,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
let Some(eventfd_ref) = weak_eventfd.upgrade() else {
throw_unsup_format!("eventfd FD got closed while blocking.")
};
// Since we pass the weak file description ref, it is guaranteed to be
// an eventfd file description.
let eventfd = eventfd_ref.downcast::<Event>().unwrap();
match eventfd.counter.get().checked_add(num) {
Some(new_count @ 0..=MAX_COUNTER) => {
// Future `read` calls will synchronize with this write, so update the FD clock.
ecx.release_clock(|clock| {
eventfd.clock.borrow_mut().join(clock);
});
// When this function is called, the addition is guaranteed to not exceed u64::MAX - 1.
eventfd.counter.set(new_count);
// When any of the event happened, we check and update the status of all supported event
// types for current file description.
ecx.check_and_update_readiness(&eventfd_ref)?;
// Unblock *all* threads previously blocked on `read`.
// We need to take out the blocked thread ids and unblock them together,
// because `unblock_threads` may block them again and end up re-adding the
// thread to the blocked list.
let waiting_threads = std::mem::take(&mut *eventfd.blocked_read_tid.borrow_mut());
// FIXME: We can randomize the order of unblocking.
for thread_id in waiting_threads {
ecx.unblock_thread(thread_id, BlockReason::Eventfd)?;
}
// Return how many bytes we wrote.
return ecx.write_int(buf_place.layout.size.bytes(), dest);
}
None | Some(u64::MAX) => {
if eventfd.is_nonblock {
return ecx.set_last_error_and_return(ErrorKind::WouldBlock, dest);
}
let dest = dest.clone();
eventfd.blocked_write_tid.borrow_mut().push(ecx.active_thread());
ecx.block_thread(
BlockReason::Eventfd,
None,
callback!(
@capture<'tcx> {
num: u64,
buf_place: MPlaceTy<'tcx>,
dest: MPlaceTy<'tcx>,
weak_eventfd: WeakFileDescriptionRef,
}
@unblock = |this| {
eventfd_write(num, buf_place, &dest, weak_eventfd, this)
}
),
);
}
};
interp_ok(())
}
/// Block thread if the current counter is 0,
/// else just return the current counter value to the caller and set the counter to 0.
fn eventfd_read<'tcx>(
buf_place: MPlaceTy<'tcx>,
dest: &MPlaceTy<'tcx>,
weak_eventfd: WeakFileDescriptionRef,
ecx: &mut MiriInterpCx<'tcx>,
) -> InterpResult<'tcx> {
let Some(eventfd_ref) = weak_eventfd.upgrade() else {
throw_unsup_format!("eventfd FD got closed while blocking.")
};
// Since we pass the weak file description ref to the callback function, it is guaranteed to be
// an eventfd file description.
let eventfd = eventfd_ref.downcast::<Event>().unwrap();
// Block when counter == 0.
let counter = eventfd.counter.replace(0);
if counter == 0 {
if eventfd.is_nonblock {
return ecx.set_last_error_and_return(ErrorKind::WouldBlock, dest);
}
let dest = dest.clone();
eventfd.blocked_read_tid.borrow_mut().push(ecx.active_thread());
ecx.block_thread(
BlockReason::Eventfd,
None,
callback!(
@capture<'tcx> {
buf_place: MPlaceTy<'tcx>,
dest: MPlaceTy<'tcx>,
weak_eventfd: WeakFileDescriptionRef,
}
@unblock = |this| {
eventfd_read(buf_place, &dest, weak_eventfd, this)
}
),
);
} else {
// Synchronize with all prior `write` calls to this FD.
ecx.acquire_clock(&eventfd.clock.borrow());
// Give old counter value to userspace, and set counter value to 0.
ecx.write_int(counter, &buf_place)?;
// When any of the events happened, we check and update the status of all supported event
// types for current file description.
ecx.check_and_update_readiness(&eventfd_ref)?;
// Unblock *all* threads previously blocked on `write`.
// We need to take out the blocked thread ids and unblock them together,
// because `unblock_threads` may block them again and end up re-adding the
// thread to the blocked list.
let waiting_threads = std::mem::take(&mut *eventfd.blocked_write_tid.borrow_mut());
// FIXME: We can randomize the order of unblocking.
for thread_id in waiting_threads {
ecx.unblock_thread(thread_id, BlockReason::Eventfd)?;
}
// Tell userspace how many bytes we read.
return ecx.write_int(buf_place.layout.size.bytes(), dest);
}
interp_ok(())
}

View File

@ -9,7 +9,7 @@ struct LinuxFutex {
/// Implementation of the SYS_futex syscall.
/// `args` is the arguments *including* the syscall number.
pub fn futex<'tcx>(
this: &mut MiriInterpCx<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
args: &[OpTy<'tcx>],
dest: &MPlaceTy<'tcx>,
) -> InterpResult<'tcx> {
@ -26,19 +26,19 @@ pub fn futex<'tcx>(
// The first three arguments (after the syscall number itself) are the same to all futex operations:
// (int *addr, int op, int val).
// We checked above that these definitely exist.
let addr = this.read_pointer(addr)?;
let op = this.read_scalar(op)?.to_i32()?;
let val = this.read_scalar(val)?.to_i32()?;
let addr = ecx.read_pointer(addr)?;
let op = ecx.read_scalar(op)?.to_i32()?;
let val = ecx.read_scalar(val)?.to_i32()?;
// This is a vararg function so we have to bring our own type for this pointer.
let addr = this.ptr_to_mplace(addr, this.machine.layouts.i32);
let addr = ecx.ptr_to_mplace(addr, ecx.machine.layouts.i32);
let futex_private = this.eval_libc_i32("FUTEX_PRIVATE_FLAG");
let futex_wait = this.eval_libc_i32("FUTEX_WAIT");
let futex_wait_bitset = this.eval_libc_i32("FUTEX_WAIT_BITSET");
let futex_wake = this.eval_libc_i32("FUTEX_WAKE");
let futex_wake_bitset = this.eval_libc_i32("FUTEX_WAKE_BITSET");
let futex_realtime = this.eval_libc_i32("FUTEX_CLOCK_REALTIME");
let futex_private = ecx.eval_libc_i32("FUTEX_PRIVATE_FLAG");
let futex_wait = ecx.eval_libc_i32("FUTEX_WAIT");
let futex_wait_bitset = ecx.eval_libc_i32("FUTEX_WAIT_BITSET");
let futex_wake = ecx.eval_libc_i32("FUTEX_WAKE");
let futex_wake_bitset = ecx.eval_libc_i32("FUTEX_WAKE_BITSET");
let futex_realtime = ecx.eval_libc_i32("FUTEX_CLOCK_REALTIME");
// FUTEX_PRIVATE enables an optimization that stops it from working across processes.
// Miri doesn't support that anyway, so we ignore that flag.
@ -57,9 +57,9 @@ pub fn futex<'tcx>(
let (timeout, bitset) = if wait_bitset {
let [_, _, _, _, timeout, uaddr2, bitset] =
check_min_arg_count("`syscall(SYS_futex, FUTEX_WAIT_BITSET, ...)`", args)?;
let _timeout = this.read_pointer(timeout)?;
let _uaddr2 = this.read_pointer(uaddr2)?;
(timeout, this.read_scalar(bitset)?.to_u32()?)
let _timeout = ecx.read_pointer(timeout)?;
let _uaddr2 = ecx.read_pointer(uaddr2)?;
(timeout, ecx.read_scalar(bitset)?.to_u32()?)
} else {
let [_, _, _, _, timeout] =
check_min_arg_count("`syscall(SYS_futex, FUTEX_WAIT, ...)`", args)?;
@ -67,21 +67,21 @@ pub fn futex<'tcx>(
};
if bitset == 0 {
return this.set_last_error_and_return(LibcError("EINVAL"), dest);
return ecx.set_last_error_and_return(LibcError("EINVAL"), dest);
}
let timeout = this.deref_pointer_as(timeout, this.libc_ty_layout("timespec"))?;
let timeout = if this.ptr_is_null(timeout.ptr())? {
let timeout = ecx.deref_pointer_as(timeout, ecx.libc_ty_layout("timespec"))?;
let timeout = if ecx.ptr_is_null(timeout.ptr())? {
None
} else {
let duration = match this.read_timespec(&timeout)? {
let duration = match ecx.read_timespec(&timeout)? {
Some(duration) => duration,
None => {
return this.set_last_error_and_return(LibcError("EINVAL"), dest);
return ecx.set_last_error_and_return(LibcError("EINVAL"), dest);
}
};
let timeout_clock = if op & futex_realtime == futex_realtime {
this.check_no_isolation(
ecx.check_no_isolation(
"`futex` syscall with `op=FUTEX_WAIT` and non-null timeout with `FUTEX_CLOCK_REALTIME`",
)?;
TimeoutClock::RealTime
@ -139,36 +139,36 @@ pub fn futex<'tcx>(
//
// Thankfully, preemptions cannot happen inside a Miri shim, so we do not need to
// do anything special to guarantee fence-load-comparison atomicity.
this.atomic_fence(AtomicFenceOrd::SeqCst)?;
ecx.atomic_fence(AtomicFenceOrd::SeqCst)?;
// Read an `i32` through the pointer, regardless of any wrapper types.
// It's not uncommon for `addr` to be passed as another type than `*mut i32`, such as `*const AtomicI32`.
// We do an acquire read -- it only seems reasonable that if we observe a value here, we
// actually establish an ordering with that value.
let futex_val = this.read_scalar_atomic(&addr, AtomicReadOrd::Acquire)?.to_i32()?;
let futex_val = ecx.read_scalar_atomic(&addr, AtomicReadOrd::Acquire)?.to_i32()?;
if val == futex_val {
// The value still matches, so we block the thread and make it wait for FUTEX_WAKE.
// This cannot fail since we already did an atomic acquire read on that pointer.
// Acquire reads are only allowed on mutable memory.
let futex_ref = this
let futex_ref = ecx
.get_sync_or_init(addr.ptr(), |_| LinuxFutex { futex: Default::default() })
.unwrap()
.futex
.clone();
this.futex_wait(
ecx.futex_wait(
futex_ref,
bitset,
timeout,
Scalar::from_target_isize(0, this), // retval_succ
Scalar::from_target_isize(-1, this), // retval_timeout
Scalar::from_target_isize(0, ecx), // retval_succ
Scalar::from_target_isize(-1, ecx), // retval_timeout
dest.clone(),
LibcError("ETIMEDOUT"), // errno_timeout
);
} else {
// The futex value doesn't match the expected value, so we return failure
// right away without sleeping: -1 and errno set to EAGAIN.
return this.set_last_error_and_return(LibcError("EAGAIN"), dest);
return ecx.set_last_error_and_return(LibcError("EAGAIN"), dest);
}
}
// FUTEX_WAKE: (int *addr, int op = FUTEX_WAKE, int val)
@ -179,42 +179,42 @@ pub fn futex<'tcx>(
// Same as FUTEX_WAKE, but allows you to specify a bitset to select which threads to wake up.
op if op == futex_wake || op == futex_wake_bitset => {
let Some(futex_ref) =
this.get_sync_or_init(addr.ptr(), |_| LinuxFutex { futex: Default::default() })
ecx.get_sync_or_init(addr.ptr(), |_| LinuxFutex { futex: Default::default() })
else {
// No AllocId, or no live allocation at that AllocId.
// Return an error code. (That seems nicer than silently doing something non-intuitive.)
// This means that if an address gets reused by a new allocation,
// we'll use an independent futex queue for this... that seems acceptable.
return this.set_last_error_and_return(LibcError("EFAULT"), dest);
return ecx.set_last_error_and_return(LibcError("EFAULT"), dest);
};
let futex_ref = futex_ref.futex.clone();
let bitset = if op == futex_wake_bitset {
let [_, _, _, _, timeout, uaddr2, bitset] =
check_min_arg_count("`syscall(SYS_futex, FUTEX_WAKE_BITSET, ...)`", args)?;
let _timeout = this.read_pointer(timeout)?;
let _uaddr2 = this.read_pointer(uaddr2)?;
this.read_scalar(bitset)?.to_u32()?
let _timeout = ecx.read_pointer(timeout)?;
let _uaddr2 = ecx.read_pointer(uaddr2)?;
ecx.read_scalar(bitset)?.to_u32()?
} else {
u32::MAX
};
if bitset == 0 {
return this.set_last_error_and_return(LibcError("EINVAL"), dest);
return ecx.set_last_error_and_return(LibcError("EINVAL"), dest);
}
// Together with the SeqCst fence in futex_wait, this makes sure that futex_wait
// will see the latest value on addr which could be changed by our caller
// before doing the syscall.
this.atomic_fence(AtomicFenceOrd::SeqCst)?;
ecx.atomic_fence(AtomicFenceOrd::SeqCst)?;
let mut n = 0;
#[expect(clippy::arithmetic_side_effects)]
for _ in 0..val {
if this.futex_wake(&futex_ref, bitset)? {
if ecx.futex_wake(&futex_ref, bitset)? {
n += 1;
} else {
break;
}
}
this.write_scalar(Scalar::from_target_isize(n, this), dest)?;
ecx.write_scalar(Scalar::from_target_isize(n, ecx), dest)?;
}
op => throw_unsup_format!("Miri does not support `futex` syscall with op={}", op),
}

View File

@ -7,7 +7,7 @@ use crate::shims::unix::linux::sync::futex;
use crate::*;
pub fn syscall<'tcx>(
this: &mut MiriInterpCx<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
link_name: Symbol,
abi: ExternAbi,
args: &[OpTy<'tcx>],
@ -15,18 +15,18 @@ pub fn syscall<'tcx>(
) -> InterpResult<'tcx> {
// We do not use `check_shim` here because `syscall` is variadic. The argument
// count is checked bellow.
this.check_abi_and_shim_symbol_clash(abi, ExternAbi::C { unwind: false }, link_name)?;
ecx.check_abi_and_shim_symbol_clash(abi, ExternAbi::C { unwind: false }, link_name)?;
// The syscall variadic function is legal to call with more arguments than needed,
// extra arguments are simply ignored. The important check is that when we use an
// argument, we have to also check all arguments *before* it to ensure that they
// have the right type.
let sys_getrandom = this.eval_libc("SYS_getrandom").to_target_usize(this)?;
let sys_futex = this.eval_libc("SYS_futex").to_target_usize(this)?;
let sys_eventfd2 = this.eval_libc("SYS_eventfd2").to_target_usize(this)?;
let sys_getrandom = ecx.eval_libc("SYS_getrandom").to_target_usize(ecx)?;
let sys_futex = ecx.eval_libc("SYS_futex").to_target_usize(ecx)?;
let sys_eventfd2 = ecx.eval_libc("SYS_eventfd2").to_target_usize(ecx)?;
let [op] = check_min_arg_count("syscall", args)?;
match this.read_target_usize(op)? {
match ecx.read_target_usize(op)? {
// `libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)`
// is called if a `HashMap` is created the regular way (e.g. HashMap<K, V>).
num if num == sys_getrandom => {
@ -34,25 +34,25 @@ pub fn syscall<'tcx>(
// The first argument is the syscall id, so skip over it.
let [_, ptr, len, flags] = check_min_arg_count("syscall(SYS_getrandom, ...)", args)?;
let ptr = this.read_pointer(ptr)?;
let len = this.read_target_usize(len)?;
let ptr = ecx.read_pointer(ptr)?;
let len = ecx.read_target_usize(len)?;
// The only supported flags are GRND_RANDOM and GRND_NONBLOCK,
// neither of which have any effect on our current PRNG.
// See <https://github.com/rust-lang/rust/pull/79196> for a discussion of argument sizes.
let _flags = this.read_scalar(flags)?.to_i32()?;
let _flags = ecx.read_scalar(flags)?.to_i32()?;
this.gen_random(ptr, len)?;
this.write_scalar(Scalar::from_target_usize(len, this), dest)?;
ecx.gen_random(ptr, len)?;
ecx.write_scalar(Scalar::from_target_usize(len, ecx), dest)?;
}
// `futex` is used by some synchronization primitives.
num if num == sys_futex => {
futex(this, args, dest)?;
futex(ecx, args, dest)?;
}
num if num == sys_eventfd2 => {
let [_, initval, flags] = check_min_arg_count("syscall(SYS_evetfd2, ...)", args)?;
let result = this.eventfd(initval, flags)?;
this.write_int(result.to_i32()?, dest)?;
let result = ecx.eventfd(initval, flags)?;
ecx.write_int(result.to_i32()?, dest)?;
}
num => {
throw_unsup_format!("syscall: unsupported syscall number {num}");

View File

@ -14,18 +14,21 @@ use rustc_abi::Size;
use crate::*;
#[derive(Copy, Clone)]
#[derive(Clone)]
enum MacOsUnfairLock {
Poisoned,
Active { id: MutexId },
Active { mutex_ref: MutexRef },
}
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn os_unfair_lock_get_data(
&mut self,
fn os_unfair_lock_get_data<'a>(
&'a mut self,
lock_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, MacOsUnfairLock> {
) -> InterpResult<'tcx, &'a MacOsUnfairLock>
where
'tcx: 'a,
{
let this = self.eval_context_mut();
let lock = this.deref_pointer(lock_ptr)?;
this.lazy_sync_get_data(
@ -42,8 +45,8 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
interp_ok(MacOsUnfairLock::Poisoned)
},
|ecx| {
let id = ecx.machine.sync.mutex_create();
interp_ok(MacOsUnfairLock::Active { id })
let mutex_ref = ecx.machine.sync.mutex_create();
interp_ok(MacOsUnfairLock::Active { mutex_ref })
},
)
}
@ -54,7 +57,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
// Trying to get a poisoned lock. Just block forever...
this.block_thread(
BlockReason::Sleep,
@ -68,18 +71,19 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
);
return interp_ok(());
};
let mutex_ref = mutex_ref.clone();
if this.mutex_is_locked(id) {
if this.mutex_get_owner(id) == this.active_thread() {
if this.mutex_is_locked(&mutex_ref) {
if this.mutex_get_owner(&mutex_ref) == this.active_thread() {
// Matching the current macOS implementation: abort on reentrant locking.
throw_machine_stop!(TerminationInfo::Abort(
"attempted to lock an os_unfair_lock that is already locked by the current thread".to_owned()
));
}
this.mutex_enqueue_and_block(id, None);
this.mutex_enqueue_and_block(&mutex_ref, None);
} else {
this.mutex_lock(id);
this.mutex_lock(&mutex_ref);
}
interp_ok(())
@ -92,18 +96,19 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
// Trying to get a poisoned lock. That never works.
this.write_scalar(Scalar::from_bool(false), dest)?;
return interp_ok(());
};
let mutex_ref = mutex_ref.clone();
if this.mutex_is_locked(id) {
if this.mutex_is_locked(&mutex_ref) {
// Contrary to the blocking lock function, this does not check for
// reentrancy.
this.write_scalar(Scalar::from_bool(false), dest)?;
} else {
this.mutex_lock(id);
this.mutex_lock(&mutex_ref);
this.write_scalar(Scalar::from_bool(true), dest)?;
}
@ -113,15 +118,16 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
throw_machine_stop!(TerminationInfo::Abort(
"attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
));
};
let mutex_ref = mutex_ref.clone();
// Now, unlock.
if this.mutex_unlock(id)?.is_none() {
if this.mutex_unlock(&mutex_ref)?.is_none() {
// Matching the current macOS implementation: abort.
throw_machine_stop!(TerminationInfo::Abort(
"attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
@ -130,7 +136,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// If the lock is not locked by anyone now, it went quer.
// Reset to zero so that it can be moved and initialized again for the next phase.
if !this.mutex_is_locked(id) {
if !this.mutex_is_locked(&mutex_ref) {
let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
}
@ -141,13 +147,17 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
throw_machine_stop!(TerminationInfo::Abort(
"called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
));
};
if !this.mutex_is_locked(id) || this.mutex_get_owner(id) != this.active_thread() {
let mutex_ref = mutex_ref.clone();
if !this.mutex_is_locked(&mutex_ref)
|| this.mutex_get_owner(&mutex_ref) != this.active_thread()
{
throw_machine_stop!(TerminationInfo::Abort(
"called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
));
@ -161,11 +171,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
let MacOsUnfairLock::Active { mutex_ref } = this.os_unfair_lock_get_data(lock_op)? else {
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
return interp_ok(());
};
if this.mutex_is_locked(id) && this.mutex_get_owner(id) == this.active_thread() {
let mutex_ref = mutex_ref.clone();
if this.mutex_is_locked(&mutex_ref)
&& this.mutex_get_owner(&mutex_ref) == this.active_thread()
{
throw_machine_stop!(TerminationInfo::Abort(
"called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned()
));
@ -173,7 +187,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// If the lock is not locked by anyone now, it went quer.
// Reset to zero so that it can be moved and initialized again for the next phase.
if !this.mutex_is_locked(id) {
if !this.mutex_is_locked(&mutex_ref) {
let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
}

View File

@ -116,9 +116,9 @@ enum MutexKind {
ErrorCheck,
}
#[derive(Debug, Clone, Copy)]
#[derive(Debug, Clone)]
struct PthreadMutex {
id: MutexId,
mutex_ref: MutexRef,
kind: MutexKind,
}
@ -175,19 +175,20 @@ fn mutex_create<'tcx>(
) -> InterpResult<'tcx, PthreadMutex> {
let mutex = ecx.deref_pointer(mutex_ptr)?;
let id = ecx.machine.sync.mutex_create();
let data = PthreadMutex { id, kind };
ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data)?;
let data = PthreadMutex { mutex_ref: id, kind };
ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data.clone())?;
interp_ok(data)
}
/// Returns the `MutexId` of the mutex stored at `mutex_op`.
///
/// `mutex_get_id` will also check if the mutex has been moved since its first use and
/// return an error if it has.
/// Returns the mutex data stored at the address that `mutex_ptr` points to.
/// Will raise an error if the mutex has been moved since its first use.
fn mutex_get_data<'tcx, 'a>(
ecx: &'a mut MiriInterpCx<'tcx>,
mutex_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, PthreadMutex> {
) -> InterpResult<'tcx, &'a PthreadMutex>
where
'tcx: 'a,
{
let mutex = ecx.deref_pointer(mutex_ptr)?;
ecx.lazy_sync_get_data(
&mutex,
@ -196,7 +197,7 @@ fn mutex_get_data<'tcx, 'a>(
|ecx| {
let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
let id = ecx.machine.sync.mutex_create();
interp_ok(PthreadMutex { id, kind })
interp_ok(PthreadMutex { mutex_ref: id, kind })
},
)
}
@ -261,10 +262,13 @@ fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size
interp_ok(offset)
}
fn rwlock_get_data<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
fn rwlock_get_data<'tcx, 'a>(
ecx: &'a mut MiriInterpCx<'tcx>,
rwlock_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, PthreadRwLock> {
) -> InterpResult<'tcx, &'a PthreadRwLock>
where
'tcx: 'a,
{
let rwlock = ecx.deref_pointer(rwlock_ptr)?;
ecx.lazy_sync_get_data(
&rwlock,
@ -391,10 +395,13 @@ fn cond_create<'tcx>(
interp_ok(data)
}
fn cond_get_data<'tcx>(
ecx: &mut MiriInterpCx<'tcx>,
fn cond_get_data<'tcx, 'a>(
ecx: &'a mut MiriInterpCx<'tcx>,
cond_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, PthreadCondvar> {
) -> InterpResult<'tcx, &'a PthreadCondvar>
where
'tcx: 'a,
{
let cond = ecx.deref_pointer(cond_ptr)?;
ecx.lazy_sync_get_data(
&cond,
@ -500,12 +507,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let mutex = mutex_get_data(this, mutex_op)?;
let mutex = mutex_get_data(this, mutex_op)?.clone();
let ret = if this.mutex_is_locked(mutex.id) {
let owner_thread = this.mutex_get_owner(mutex.id);
let ret = if this.mutex_is_locked(&mutex.mutex_ref) {
let owner_thread = this.mutex_get_owner(&mutex.mutex_ref);
if owner_thread != this.active_thread() {
this.mutex_enqueue_and_block(mutex.id, Some((Scalar::from_i32(0), dest.clone())));
this.mutex_enqueue_and_block(
&mutex.mutex_ref,
Some((Scalar::from_i32(0), dest.clone())),
);
return interp_ok(());
} else {
// Trying to acquire the same mutex again.
@ -517,14 +527,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
MutexKind::Normal => throw_machine_stop!(TerminationInfo::Deadlock),
MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
MutexKind::Recursive => {
this.mutex_lock(mutex.id);
this.mutex_lock(&mutex.mutex_ref);
0
}
}
}
} else {
// The mutex is unlocked. Let's lock it.
this.mutex_lock(mutex.id);
this.mutex_lock(&mutex.mutex_ref);
0
};
this.write_scalar(Scalar::from_i32(ret), dest)?;
@ -534,10 +544,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let mutex = mutex_get_data(this, mutex_op)?;
let mutex = mutex_get_data(this, mutex_op)?.clone();
interp_ok(Scalar::from_i32(if this.mutex_is_locked(mutex.id) {
let owner_thread = this.mutex_get_owner(mutex.id);
interp_ok(Scalar::from_i32(if this.mutex_is_locked(&mutex.mutex_ref) {
let owner_thread = this.mutex_get_owner(&mutex.mutex_ref);
if owner_thread != this.active_thread() {
this.eval_libc_i32("EBUSY")
} else {
@ -545,14 +555,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
this.eval_libc_i32("EBUSY"),
MutexKind::Recursive => {
this.mutex_lock(mutex.id);
this.mutex_lock(&mutex.mutex_ref);
0
}
}
}
} else {
// The mutex is unlocked. Let's lock it.
this.mutex_lock(mutex.id);
this.mutex_lock(&mutex.mutex_ref);
0
}))
}
@ -560,9 +570,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
let this = self.eval_context_mut();
let mutex = mutex_get_data(this, mutex_op)?;
let mutex = mutex_get_data(this, mutex_op)?.clone();
if let Some(_old_locked_count) = this.mutex_unlock(mutex.id)? {
if let Some(_old_locked_count) = this.mutex_unlock(&mutex.mutex_ref)? {
// The mutex was locked by the current thread.
interp_ok(Scalar::from_i32(0))
} else {
@ -588,10 +598,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
// Reading the field also has the side-effect that we detect double-`destroy`
// since we make the field unint below.
let mutex = mutex_get_data(this, mutex_op)?;
// since we make the field uninit below.
let mutex = mutex_get_data(this, mutex_op)?.clone();
if this.mutex_is_locked(mutex.id) {
if this.mutex_is_locked(&mutex.mutex_ref) {
throw_ub_format!("destroyed a locked mutex");
}
@ -696,7 +706,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
// Reading the field also has the side-effect that we detect double-`destroy`
// since we make the field unint below.
// since we make the field uninit below.
let id = rwlock_get_data(this, rwlock_op)?.id;
if this.rwlock_is_locked(id) {
@ -821,12 +831,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let data = cond_get_data(this, cond_op)?;
let mutex_id = mutex_get_data(this, mutex_op)?.id;
let data = *cond_get_data(this, cond_op)?;
let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
this.condvar_wait(
data.id,
mutex_id,
mutex_ref,
None, // no timeout
Scalar::from_i32(0),
Scalar::from_i32(0), // retval_timeout -- unused
@ -845,8 +855,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let data = cond_get_data(this, cond_op)?;
let mutex_id = mutex_get_data(this, mutex_op)?.id;
let data = *cond_get_data(this, cond_op)?;
let mutex_ref = mutex_get_data(this, mutex_op)?.mutex_ref.clone();
// Extract the timeout.
let duration = match this
@ -869,7 +879,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
this.condvar_wait(
data.id,
mutex_id,
mutex_ref,
Some((timeout_clock, TimeoutAnchor::Absolute, duration)),
Scalar::from_i32(0),
this.eval_libc("ETIMEDOUT"), // retval_timeout
@ -883,7 +893,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
// Reading the field also has the side-effect that we detect double-`destroy`
// since we make the field unint below.
// since we make the field uninit below.
let id = cond_get_data(this, cond_op)?.id;
if this.condvar_is_awaited(id) {
throw_ub_format!("destroying an awaited conditional variable");

View File

@ -7,6 +7,7 @@ use rustc_span::Symbol;
use self::shims::windows::handle::{Handle, PseudoHandle};
use crate::shims::os_str::bytes_to_os_str;
use crate::shims::windows::handle::HandleError;
use crate::shims::windows::*;
use crate::*;
@ -488,7 +489,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let thread_id =
this.CreateThread(security, stacksize, start, arg, flags, thread)?;
this.write_scalar(Handle::Thread(thread_id.to_u32()).to_scalar(this), dest)?;
this.write_scalar(Handle::Thread(thread_id).to_scalar(this), dest)?;
}
"WaitForSingleObject" => {
let [handle, timeout] =
@ -513,10 +514,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let handle = this.read_scalar(handle)?;
let name = this.read_wide_str(this.read_pointer(name)?)?;
let thread = match Handle::from_scalar(handle, this)? {
Some(Handle::Thread(thread)) => this.thread_id_try_from(thread),
Some(Handle::Pseudo(PseudoHandle::CurrentThread)) => Ok(this.active_thread()),
_ => this.invalid_handle("SetThreadDescription")?,
let thread = match Handle::try_from_scalar(handle, this)? {
Ok(Handle::Thread(thread)) => Ok(thread),
Ok(Handle::Pseudo(PseudoHandle::CurrentThread)) => Ok(this.active_thread()),
Ok(_) | Err(HandleError::InvalidHandle) =>
this.invalid_handle("SetThreadDescription")?,
Err(HandleError::ThreadNotFound(e)) => Err(e),
};
let res = match thread {
Ok(thread) => {
@ -536,10 +539,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let handle = this.read_scalar(handle)?;
let name_ptr = this.deref_pointer(name_ptr)?; // the pointer where we should store the ptr to the name
let thread = match Handle::from_scalar(handle, this)? {
Some(Handle::Thread(thread)) => this.thread_id_try_from(thread),
Some(Handle::Pseudo(PseudoHandle::CurrentThread)) => Ok(this.active_thread()),
_ => this.invalid_handle("GetThreadDescription")?,
let thread = match Handle::try_from_scalar(handle, this)? {
Ok(Handle::Thread(thread)) => Ok(thread),
Ok(Handle::Pseudo(PseudoHandle::CurrentThread)) => Ok(this.active_thread()),
Ok(_) | Err(HandleError::InvalidHandle) =>
this.invalid_handle("GetThreadDescription")?,
Err(HandleError::ThreadNotFound(e)) => Err(e),
};
let (name, res) = match thread {
Ok(thread) => {

View File

@ -2,6 +2,7 @@ use std::mem::variant_count;
use rustc_abi::HasDataLayout;
use crate::concurrency::thread::ThreadNotFound;
use crate::*;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
@ -14,7 +15,7 @@ pub enum PseudoHandle {
pub enum Handle {
Null,
Pseudo(PseudoHandle),
Thread(u32),
Thread(ThreadId),
}
impl PseudoHandle {
@ -34,6 +35,14 @@ impl PseudoHandle {
}
}
/// Errors that can occur when constructing a [`Handle`] from a Scalar.
pub enum HandleError {
/// There is no thread with the given ID.
ThreadNotFound(ThreadNotFound),
/// Can't convert scalar to handle because it is structurally invalid.
InvalidHandle,
}
impl Handle {
const NULL_DISCRIMINANT: u32 = 0;
const PSEUDO_DISCRIMINANT: u32 = 1;
@ -51,7 +60,7 @@ impl Handle {
match self {
Self::Null => 0,
Self::Pseudo(pseudo_handle) => pseudo_handle.value(),
Self::Thread(thread) => thread,
Self::Thread(thread) => thread.to_u32(),
}
}
@ -95,7 +104,7 @@ impl Handle {
match discriminant {
Self::NULL_DISCRIMINANT if data == 0 => Some(Self::Null),
Self::PSEUDO_DISCRIMINANT => Some(Self::Pseudo(PseudoHandle::from_value(data)?)),
Self::THREAD_DISCRIMINANT => Some(Self::Thread(data)),
Self::THREAD_DISCRIMINANT => Some(Self::Thread(ThreadId::new_unchecked(data))),
_ => None,
}
}
@ -126,10 +135,14 @@ impl Handle {
Scalar::from_target_isize(signed_handle.into(), cx)
}
pub fn from_scalar<'tcx>(
/// Convert a scalar into a structured `Handle`.
/// Structurally invalid handles return [`HandleError::InvalidHandle`].
/// If the handle is structurally valid but semantically invalid, e.g. a for non-existent thread
/// ID, returns [`HandleError::ThreadNotFound`].
pub fn try_from_scalar<'tcx>(
handle: Scalar,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Option<Self>> {
cx: &MiriInterpCx<'tcx>,
) -> InterpResult<'tcx, Result<Self, HandleError>> {
let sign_extended_handle = handle.to_target_isize(cx)?;
#[expect(clippy::cast_sign_loss)] // we want to lose the sign
@ -137,10 +150,20 @@ impl Handle {
signed_handle as u32
} else {
// if a handle doesn't fit in an i32, it isn't valid.
return interp_ok(None);
return interp_ok(Err(HandleError::InvalidHandle));
};
interp_ok(Self::from_packed(handle))
match Self::from_packed(handle) {
Some(Self::Thread(thread)) => {
// validate the thread id
match cx.machine.threads.thread_id_try_from(thread.to_u32()) {
Ok(id) => interp_ok(Ok(Self::Thread(id))),
Err(e) => interp_ok(Err(HandleError::ThreadNotFound(e))),
}
}
Some(handle) => interp_ok(Ok(handle)),
None => interp_ok(Err(HandleError::InvalidHandle)),
}
}
}
@ -158,14 +181,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let this = self.eval_context_mut();
let handle = this.read_scalar(handle_op)?;
let ret = match Handle::from_scalar(handle, this)? {
Some(Handle::Thread(thread)) => {
if let Ok(thread) = this.thread_id_try_from(thread) {
this.detach_thread(thread, /*allow_terminated_joined*/ true)?;
this.eval_windows("c", "TRUE")
} else {
this.invalid_handle("CloseHandle")?
}
let ret = match Handle::try_from_scalar(handle, this)? {
Ok(Handle::Thread(thread)) => {
this.detach_thread(thread, /*allow_terminated_joined*/ true)?;
this.eval_windows("c", "TRUE")
}
_ => this.invalid_handle("CloseHandle")?,
};

View File

@ -20,10 +20,13 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Windows sync primitives are pointer sized.
// We only use the first 4 bytes for the id.
fn init_once_get_data(
&mut self,
fn init_once_get_data<'a>(
&'a mut self,
init_once_ptr: &OpTy<'tcx>,
) -> InterpResult<'tcx, WindowsInitOnce> {
) -> InterpResult<'tcx, &'a WindowsInitOnce>
where
'tcx: 'a,
{
let this = self.eval_context_mut();
let init_once = this.deref_pointer(init_once_ptr)?;

View File

@ -65,15 +65,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let handle = this.read_scalar(handle_op)?;
let timeout = this.read_scalar(timeout_op)?.to_u32()?;
let thread = match Handle::from_scalar(handle, this)? {
Some(Handle::Thread(thread)) =>
match this.thread_id_try_from(thread) {
Ok(thread) => thread,
Err(_) => this.invalid_handle("WaitForSingleObject")?,
},
let thread = match Handle::try_from_scalar(handle, this)? {
Ok(Handle::Thread(thread)) => thread,
// Unlike on posix, the outcome of joining the current thread is not documented.
// On current Windows, it just deadlocks.
Some(Handle::Pseudo(PseudoHandle::CurrentThread)) => this.active_thread(),
Ok(Handle::Pseudo(PseudoHandle::CurrentThread)) => this.active_thread(),
_ => this.invalid_handle("WaitForSingleObject")?,
};

View File

@ -132,7 +132,7 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Performs an AES round (given by `f`) on each 128-bit word of
// `state` with the corresponding 128-bit key of `key`.
fn aes_round<'tcx>(
this: &mut crate::MiriInterpCx<'tcx>,
ecx: &mut crate::MiriInterpCx<'tcx>,
state: &OpTy<'tcx>,
key: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
@ -145,21 +145,20 @@ fn aes_round<'tcx>(
assert_eq!(dest.layout.size.bytes() % 16, 0);
let len = dest.layout.size.bytes() / 16;
let u128_array_layout =
this.layout_of(Ty::new_array(this.tcx.tcx, this.tcx.types.u128, len))?;
let u128_array_layout = ecx.layout_of(Ty::new_array(ecx.tcx.tcx, ecx.tcx.types.u128, len))?;
let state = state.transmute(u128_array_layout, this)?;
let key = key.transmute(u128_array_layout, this)?;
let dest = dest.transmute(u128_array_layout, this)?;
let state = state.transmute(u128_array_layout, ecx)?;
let key = key.transmute(u128_array_layout, ecx)?;
let dest = dest.transmute(u128_array_layout, ecx)?;
for i in 0..len {
let state = this.read_scalar(&this.project_index(&state, i)?)?.to_u128()?;
let key = this.read_scalar(&this.project_index(&key, i)?)?.to_u128()?;
let dest = this.project_index(&dest, i)?;
let state = ecx.read_scalar(&ecx.project_index(&state, i)?)?.to_u128()?;
let key = ecx.read_scalar(&ecx.project_index(&key, i)?)?.to_u128()?;
let dest = ecx.project_index(&dest, i)?;
let res = f(state, key);
this.write_scalar(Scalar::from_u128(res), &dest)?;
ecx.write_scalar(Scalar::from_u128(res), &dest)?;
}
interp_ok(())

View File

@ -75,21 +75,21 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
/// If `inverse` is set, then the inverse transformation with respect to the reduction polynomial
/// x^8 + x^4 + x^3 + x + 1 is performed instead.
fn affine_transform<'tcx>(
this: &mut MiriInterpCx<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
left: &OpTy<'tcx>,
right: &OpTy<'tcx>,
imm8: &OpTy<'tcx>,
dest: &MPlaceTy<'tcx>,
inverse: bool,
) -> InterpResult<'tcx, ()> {
let (left, left_len) = this.project_to_simd(left)?;
let (right, right_len) = this.project_to_simd(right)?;
let (dest, dest_len) = this.project_to_simd(dest)?;
let (left, left_len) = ecx.project_to_simd(left)?;
let (right, right_len) = ecx.project_to_simd(right)?;
let (dest, dest_len) = ecx.project_to_simd(dest)?;
assert_eq!(dest_len, right_len);
assert_eq!(dest_len, left_len);
let imm8 = this.read_scalar(imm8)?.to_u8()?;
let imm8 = ecx.read_scalar(imm8)?.to_u8()?;
// Each 8x8 bit matrix gets multiplied with eight bit vectors.
// Therefore, the iteration is done in chunks of eight.
@ -98,13 +98,13 @@ fn affine_transform<'tcx>(
let mut matrix = [0u8; 8];
for j in 0..8 {
matrix[usize::try_from(j).unwrap()] =
this.read_scalar(&this.project_index(&right, i.wrapping_add(j))?)?.to_u8()?;
ecx.read_scalar(&ecx.project_index(&right, i.wrapping_add(j))?)?.to_u8()?;
}
// Multiply the matrix with the vector and perform the addition.
for j in 0..8 {
let index = i.wrapping_add(j);
let left = this.read_scalar(&this.project_index(&left, index)?)?.to_u8()?;
let left = ecx.read_scalar(&ecx.project_index(&left, index)?)?.to_u8()?;
let left = if inverse { TABLE[usize::from(left)] } else { left };
let mut res = 0;
@ -124,8 +124,8 @@ fn affine_transform<'tcx>(
// Perform the addition.
res ^= imm8;
let dest = this.project_index(&dest, index)?;
this.write_scalar(Scalar::from_u8(res), &dest)?;
let dest = ecx.project_index(&dest, index)?;
ecx.write_scalar(Scalar::from_u8(res), &dest)?;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -23,27 +23,27 @@ pub(super) trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// Prefix should have already been checked.
let unprefixed_name = link_name.as_str().strip_prefix("llvm.x86.sha").unwrap();
fn read<'c>(this: &mut MiriInterpCx<'c>, reg: &OpTy<'c>) -> InterpResult<'c, [u32; 4]> {
fn read<'c>(ecx: &mut MiriInterpCx<'c>, reg: &OpTy<'c>) -> InterpResult<'c, [u32; 4]> {
let mut res = [0; 4];
// We reverse the order because x86 is little endian but the copied implementation uses
// big endian.
for (i, dst) in res.iter_mut().rev().enumerate() {
let projected = &this.project_index(reg, i.try_into().unwrap())?;
*dst = this.read_scalar(projected)?.to_u32()?
let projected = &ecx.project_index(reg, i.try_into().unwrap())?;
*dst = ecx.read_scalar(projected)?.to_u32()?
}
interp_ok(res)
}
fn write<'c>(
this: &mut MiriInterpCx<'c>,
ecx: &mut MiriInterpCx<'c>,
dest: &MPlaceTy<'c>,
val: [u32; 4],
) -> InterpResult<'c, ()> {
// We reverse the order because x86 is little endian but the copied implementation uses
// big endian.
for (i, part) in val.into_iter().rev().enumerate() {
let projected = &this.project_index(dest, i.try_into().unwrap())?;
this.write_scalar(Scalar::from_u32(part), projected)?;
let projected = &ecx.project_index(dest, i.try_into().unwrap())?;
ecx.write_scalar(Scalar::from_u32(part), projected)?;
}
interp_ok(())
}

View File

@ -70,7 +70,7 @@ const USE_SIGNED: u8 = 2;
/// For more information, see the Intel Software Developer's Manual, Vol. 2b, Chapter 4.1.
#[expect(clippy::arithmetic_side_effects)]
fn compare_strings<'tcx>(
this: &mut MiriInterpCx<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
str1: &OpTy<'tcx>,
str2: &OpTy<'tcx>,
len: Option<(u64, u64)>,
@ -80,8 +80,8 @@ fn compare_strings<'tcx>(
let (len1, len2) = if let Some(t) = len {
t
} else {
let len1 = implicit_len(this, str1, imm)?.unwrap_or(default_len);
let len2 = implicit_len(this, str2, imm)?.unwrap_or(default_len);
let len1 = implicit_len(ecx, str1, imm)?.unwrap_or(default_len);
let len2 = implicit_len(ecx, str2, imm)?.unwrap_or(default_len);
(len1, len2)
};
@ -90,12 +90,12 @@ fn compare_strings<'tcx>(
0 => {
// Equal any: Checks which characters of `str2` are inside `str1`.
for i in 0..len2 {
let ch2 = this.read_immediate(&this.project_index(str2, i)?)?;
let ch2 = ecx.read_immediate(&ecx.project_index(str2, i)?)?;
for j in 0..len1 {
let ch1 = this.read_immediate(&this.project_index(str1, j)?)?;
let ch1 = ecx.read_immediate(&ecx.project_index(str1, j)?)?;
let eq = this.binary_op(mir::BinOp::Eq, &ch1, &ch2)?;
let eq = ecx.binary_op(mir::BinOp::Eq, &ch1, &ch2)?;
if eq.to_scalar().to_bool()? {
result |= 1 << i;
break;
@ -119,9 +119,9 @@ fn compare_strings<'tcx>(
for i in 0..len2 {
for j in (0..len1).step_by(2) {
let ch2 = get_ch(this.read_scalar(&this.project_index(str2, i)?)?)?;
let ch1_1 = get_ch(this.read_scalar(&this.project_index(str1, j)?)?)?;
let ch1_2 = get_ch(this.read_scalar(&this.project_index(str1, j + 1)?)?)?;
let ch2 = get_ch(ecx.read_scalar(&ecx.project_index(str2, i)?)?)?;
let ch1_1 = get_ch(ecx.read_scalar(&ecx.project_index(str1, j)?)?)?;
let ch1_2 = get_ch(ecx.read_scalar(&ecx.project_index(str1, j + 1)?)?)?;
if ch1_1 <= ch2 && ch2 <= ch1_2 {
result |= 1 << i;
@ -135,9 +135,9 @@ fn compare_strings<'tcx>(
result ^= (1 << len1.max(len2)) - 1;
for i in 0..len1.min(len2) {
let ch1 = this.read_immediate(&this.project_index(str1, i)?)?;
let ch2 = this.read_immediate(&this.project_index(str2, i)?)?;
let eq = this.binary_op(mir::BinOp::Eq, &ch1, &ch2)?;
let ch1 = ecx.read_immediate(&ecx.project_index(str1, i)?)?;
let ch2 = ecx.read_immediate(&ecx.project_index(str2, i)?)?;
let eq = ecx.binary_op(mir::BinOp::Eq, &ch1, &ch2)?;
result |= i32::from(eq.to_scalar().to_bool()?) << i;
}
}
@ -159,9 +159,9 @@ fn compare_strings<'tcx>(
if k >= default_len {
break;
} else {
let ch1 = this.read_immediate(&this.project_index(str1, j)?)?;
let ch2 = this.read_immediate(&this.project_index(str2, k)?)?;
let ne = this.binary_op(mir::BinOp::Ne, &ch1, &ch2)?;
let ch1 = ecx.read_immediate(&ecx.project_index(str1, j)?)?;
let ch2 = ecx.read_immediate(&ecx.project_index(str2, k)?)?;
let ne = ecx.binary_op(mir::BinOp::Ne, &ch1, &ch2)?;
if ne.to_scalar().to_bool()? {
result &= !(1 << i);
@ -198,16 +198,16 @@ fn compare_strings<'tcx>(
/// corresponding to the x86 128-bit integer SIMD type.
fn deconstruct_args<'tcx>(
unprefixed_name: &str,
this: &mut MiriInterpCx<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
link_name: Symbol,
abi: ExternAbi,
args: &[OpTy<'tcx>],
) -> InterpResult<'tcx, (OpTy<'tcx>, OpTy<'tcx>, Option<(u64, u64)>, u8)> {
let array_layout_fn = |this: &mut MiriInterpCx<'tcx>, imm: u8| {
let array_layout_fn = |ecx: &mut MiriInterpCx<'tcx>, imm: u8| {
if imm & USE_WORDS != 0 {
this.layout_of(Ty::new_array(this.tcx.tcx, this.tcx.types.u16, 8))
ecx.layout_of(Ty::new_array(ecx.tcx.tcx, ecx.tcx.types.u16, 8))
} else {
this.layout_of(Ty::new_array(this.tcx.tcx, this.tcx.types.u8, 16))
ecx.layout_of(Ty::new_array(ecx.tcx.tcx, ecx.tcx.types.u8, 16))
}
};
@ -223,26 +223,26 @@ fn deconstruct_args<'tcx>(
if is_explicit {
let [str1, len1, str2, len2, imm] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let imm = this.read_scalar(imm)?.to_u8()?;
ecx.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let imm = ecx.read_scalar(imm)?.to_u8()?;
let default_len = default_len::<u32>(imm);
let len1 = u64::from(this.read_scalar(len1)?.to_u32()?.min(default_len));
let len2 = u64::from(this.read_scalar(len2)?.to_u32()?.min(default_len));
let len1 = u64::from(ecx.read_scalar(len1)?.to_u32()?.min(default_len));
let len2 = u64::from(ecx.read_scalar(len2)?.to_u32()?.min(default_len));
let array_layout = array_layout_fn(this, imm)?;
let str1 = str1.transmute(array_layout, this)?;
let str2 = str2.transmute(array_layout, this)?;
let array_layout = array_layout_fn(ecx, imm)?;
let str1 = str1.transmute(array_layout, ecx)?;
let str2 = str2.transmute(array_layout, ecx)?;
interp_ok((str1, str2, Some((len1, len2)), imm))
} else {
let [str1, str2, imm] =
this.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let imm = this.read_scalar(imm)?.to_u8()?;
ecx.check_shim(abi, ExternAbi::C { unwind: false }, link_name, args)?;
let imm = ecx.read_scalar(imm)?.to_u8()?;
let array_layout = array_layout_fn(this, imm)?;
let str1 = str1.transmute(array_layout, this)?;
let str2 = str2.transmute(array_layout, this)?;
let array_layout = array_layout_fn(ecx, imm)?;
let str1 = str1.transmute(array_layout, ecx)?;
let str2 = str2.transmute(array_layout, ecx)?;
interp_ok((str1, str2, None, imm))
}
@ -251,16 +251,16 @@ fn deconstruct_args<'tcx>(
/// Calculate the c-style string length for a given string `str`.
/// The string is either a length 16 array of bytes a length 8 array of two-byte words.
fn implicit_len<'tcx>(
this: &mut MiriInterpCx<'tcx>,
ecx: &mut MiriInterpCx<'tcx>,
str: &OpTy<'tcx>,
imm: u8,
) -> InterpResult<'tcx, Option<u64>> {
let mut result = None;
let zero = ImmTy::from_int(0, str.layout.field(this, 0));
let zero = ImmTy::from_int(0, str.layout.field(ecx, 0));
for i in 0..default_len::<u64>(imm) {
let ch = this.read_immediate(&this.project_index(str, i)?)?;
let is_zero = this.binary_op(mir::BinOp::Eq, &ch, &zero)?;
let ch = ecx.read_immediate(&ecx.project_index(str, i)?)?;
let is_zero = ecx.binary_op(mir::BinOp::Eq, &ch, &zero)?;
if is_zero.to_scalar().to_bool()? {
result = Some(i);
break;

View File

@ -0,0 +1,65 @@
//@only-target: linux
//~^ERROR: deadlocked
//~^^ERROR: deadlocked
//@compile-flags: -Zmiri-preemption-rate=0
//@error-in-other-file: deadlock
use std::thread;
// Test the behaviour of a thread being blocked on an eventfd read, get unblocked, and then
// get blocked again.
// The expected execution is
// 1. Thread 1 blocks.
// 2. Thread 2 blocks.
// 3. Thread 3 unblocks both thread 1 and thread 2.
// 4. Thread 1 reads.
// 5. Thread 2's `read` deadlocked.
fn main() {
// eventfd write will block when EFD_NONBLOCK flag is clear
// and the addition caused counter to exceed u64::MAX - 1.
let flags = libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
let thread1 = thread::spawn(move || {
thread::park();
let mut buf: [u8; 8] = [0; 8];
// This read will block initially.
let res: i64 = unsafe { libc::read(fd, buf.as_mut_ptr().cast(), 8).try_into().unwrap() };
assert_eq!(res, 8);
let counter = u64::from_ne_bytes(buf);
assert_eq!(counter, 1_u64);
});
let thread2 = thread::spawn(move || {
thread::park();
let mut buf: [u8; 8] = [0; 8];
// This read will block initially, then get unblocked by thread3, then get blocked again
// because the `read` in thread1 executes first and set the counter to 0 again.
let res: i64 = unsafe { libc::read(fd, buf.as_mut_ptr().cast(), 8).try_into().unwrap() };
//~^ERROR: deadlocked
assert_eq!(res, 8);
let counter = u64::from_ne_bytes(buf);
assert_eq!(counter, 1_u64);
});
let thread3 = thread::spawn(move || {
thread::park();
let sized_8_data = 1_u64.to_ne_bytes();
// Write 1 to the counter, so both thread1 and thread2 will unblock.
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
};
// Make sure that write is successful.
assert_eq!(res, 8);
});
thread1.thread().unpark();
thread2.thread().unpark();
thread3.thread().unpark();
thread1.join().unwrap();
thread2.join().unwrap();
thread3.join().unwrap();
}

View File

@ -0,0 +1,41 @@
error: deadlock: the evaluated program deadlocked
--> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
|
LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) };
| ^ the evaluated program deadlocked
|
= note: BACKTRACE:
= note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
= note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
= note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
note: inside `main`
--> tests/fail-dep/libc/eventfd_block_read_twice.rs:LL:CC
|
LL | thread2.join().unwrap();
| ^^^^^^^^^^^^^^
error: deadlock: the evaluated program deadlocked
|
= note: the evaluated program deadlocked
= note: (no span available)
= note: BACKTRACE on thread `unnamed-ID`:
error: deadlock: the evaluated program deadlocked
--> tests/fail-dep/libc/eventfd_block_read_twice.rs:LL:CC
|
LL | let res: i64 = unsafe { libc::read(fd, buf.as_mut_ptr().cast(), 8).try_into().unwrap() };
| ^ the evaluated program deadlocked
|
= note: BACKTRACE on thread `unnamed-ID`:
= note: inside closure at tests/fail-dep/libc/eventfd_block_read_twice.rs:LL:CC
error: deadlock: the evaluated program deadlocked
|
= note: the evaluated program deadlocked
= note: (no span available)
= note: BACKTRACE on thread `unnamed-ID`:
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 4 previous errors

View File

@ -0,0 +1,71 @@
//@only-target: linux
//~^ERROR: deadlocked
//~^^ERROR: deadlocked
//@compile-flags: -Zmiri-preemption-rate=0
//@error-in-other-file: deadlock
use std::thread;
// Test the behaviour of a thread being blocked on an eventfd `write`, get unblocked, and then
// get blocked again.
// The expected execution is
// 1. Thread 1 blocks.
// 2. Thread 2 blocks.
// 3. Thread 3 unblocks both thread 1 and thread 2.
// 4. Thread 1 writes u64::MAX.
// 5. Thread 2's `write` deadlocked.
fn main() {
// eventfd write will block when EFD_NONBLOCK flag is clear
// and the addition caused counter to exceed u64::MAX - 1.
let flags = libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
// Write u64 - 1, so the all subsequent write will block.
let sized_8_data: [u8; 8] = (u64::MAX - 1).to_ne_bytes();
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
};
assert_eq!(res, 8);
let thread1 = thread::spawn(move || {
thread::park();
let sized_8_data = (u64::MAX - 1).to_ne_bytes();
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
};
// Make sure that write is successful.
assert_eq!(res, 8);
});
let thread2 = thread::spawn(move || {
thread::park();
let sized_8_data = (u64::MAX - 1).to_ne_bytes();
// Write u64::MAX - 1, so the all subsequent write will block.
let res: i64 = unsafe {
// This `write` will initially blocked, then get unblocked by thread3, then get blocked again
// because the `write` in thread1 executes first.
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
//~^ERROR: deadlocked
};
// Make sure that write is successful.
assert_eq!(res, 8);
});
let thread3 = thread::spawn(move || {
thread::park();
let mut buf: [u8; 8] = [0; 8];
// This will unblock both `write` in thread1 and thread2.
let res: i64 = unsafe { libc::read(fd, buf.as_mut_ptr().cast(), 8).try_into().unwrap() };
assert_eq!(res, 8);
let counter = u64::from_ne_bytes(buf);
assert_eq!(counter, (u64::MAX - 1));
});
thread1.thread().unpark();
thread2.thread().unpark();
thread3.thread().unpark();
thread1.join().unwrap();
thread2.join().unwrap();
thread3.join().unwrap();
}

View File

@ -0,0 +1,41 @@
error: deadlock: the evaluated program deadlocked
--> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
|
LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) };
| ^ the evaluated program deadlocked
|
= note: BACKTRACE:
= note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
= note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
= note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
note: inside `main`
--> tests/fail-dep/libc/eventfd_block_write_twice.rs:LL:CC
|
LL | thread2.join().unwrap();
| ^^^^^^^^^^^^^^
error: deadlock: the evaluated program deadlocked
|
= note: the evaluated program deadlocked
= note: (no span available)
= note: BACKTRACE on thread `unnamed-ID`:
error: deadlock: the evaluated program deadlocked
--> tests/fail-dep/libc/eventfd_block_write_twice.rs:LL:CC
|
LL | libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
| ^ the evaluated program deadlocked
|
= note: BACKTRACE on thread `unnamed-ID`:
= note: inside closure at tests/fail-dep/libc/eventfd_block_write_twice.rs:LL:CC
error: deadlock: the evaluated program deadlocked
|
= note: the evaluated program deadlocked
= note: (no span available)
= note: BACKTRACE on thread `unnamed-ID`:
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 4 previous errors

View File

@ -0,0 +1,96 @@
//@compile-flags: -Zmiri-preemption-rate=0
//~^ERROR: deadlocked
//~^^ERROR: deadlocked
//@only-target: linux
//@error-in-other-file: deadlock
use std::convert::TryInto;
use std::thread;
use std::thread::spawn;
// Using `as` cast since `EPOLLET` wraps around
const EPOLL_IN_OUT_ET: u32 = (libc::EPOLLIN | libc::EPOLLOUT | libc::EPOLLET) as _;
#[track_caller]
fn check_epoll_wait<const N: usize>(
epfd: i32,
expected_notifications: &[(u32, u64)],
timeout: i32,
) {
let epoll_event = libc::epoll_event { events: 0, u64: 0 };
let mut array: [libc::epoll_event; N] = [epoll_event; N];
let maxsize = N;
let array_ptr = array.as_mut_ptr();
let res = unsafe { libc::epoll_wait(epfd, array_ptr, maxsize.try_into().unwrap(), timeout) };
if res < 0 {
panic!("epoll_wait failed: {}", std::io::Error::last_os_error());
}
assert_eq!(
res,
expected_notifications.len().try_into().unwrap(),
"got wrong number of notifications"
);
let slice = unsafe { std::slice::from_raw_parts(array_ptr, res.try_into().unwrap()) };
for (return_event, expected_event) in slice.iter().zip(expected_notifications.iter()) {
let event = return_event.events;
let data = return_event.u64;
assert_eq!(event, expected_event.0, "got wrong events");
assert_eq!(data, expected_event.1, "got wrong data");
}
}
// Test if only one thread is unblocked if multiple threads blocked on same epfd.
// Expected execution:
// 1. Thread 2 blocks.
// 2. Thread 3 blocks.
// 3. Thread 1 unblocks thread 3.
// 4. Thread 2 deadlocks.
fn main() {
// Create an epoll instance.
let epfd = unsafe { libc::epoll_create1(0) };
assert_ne!(epfd, -1);
// Create a socketpair instance.
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
// Register one side of the socketpair with epoll.
let mut ev = libc::epoll_event { events: EPOLL_IN_OUT_ET, u64: fds[0] as u64 };
let res = unsafe { libc::epoll_ctl(epfd, libc::EPOLL_CTL_ADD, fds[0], &mut ev) };
assert_eq!(res, 0);
// epoll_wait to clear notification.
let expected_event = u32::try_from(libc::EPOLLOUT).unwrap();
let expected_value = fds[0] as u64;
check_epoll_wait::<1>(epfd, &[(expected_event, expected_value)], 0);
let thread1 = spawn(move || {
thread::park();
let data = "abcde".as_bytes().as_ptr();
let res = unsafe { libc::write(fds[1], data as *const libc::c_void, 5) };
assert_eq!(res, 5);
});
let expected_event = u32::try_from(libc::EPOLLIN | libc::EPOLLOUT).unwrap();
let expected_value = fds[0] as u64;
let thread2 = spawn(move || {
thread::park();
check_epoll_wait::<1>(epfd, &[(expected_event, expected_value)], -1);
//~^ERROR: deadlocked
});
let thread3 = spawn(move || {
thread::park();
check_epoll_wait::<1>(epfd, &[(expected_event, expected_value)], -1);
});
thread2.thread().unpark();
thread::yield_now();
thread3.thread().unpark();
thread::yield_now();
thread1.thread().unpark();
thread1.join().unwrap();
thread2.join().unwrap();
thread3.join().unwrap();
}

View File

@ -0,0 +1,41 @@
error: deadlock: the evaluated program deadlocked
--> RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
|
LL | let ret = unsafe { libc::pthread_join(id, ptr::null_mut()) };
| ^ the evaluated program deadlocked
|
= note: BACKTRACE:
= note: inside `std::sys::pal::PLATFORM::thread::Thread::join` at RUSTLIB/std/src/sys/pal/PLATFORM/thread.rs:LL:CC
= note: inside `std::thread::JoinInner::<'_, ()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
= note: inside `std::thread::JoinHandle::<()>::join` at RUSTLIB/std/src/thread/mod.rs:LL:CC
note: inside `main`
--> tests/fail-dep/libc/libc_epoll_block_two_thread.rs:LL:CC
|
LL | thread2.join().unwrap();
| ^^^^^^^^^^^^^^
error: deadlock: the evaluated program deadlocked
|
= note: the evaluated program deadlocked
= note: (no span available)
= note: BACKTRACE on thread `unnamed-ID`:
error: deadlock: the evaluated program deadlocked
--> tests/fail-dep/libc/libc_epoll_block_two_thread.rs:LL:CC
|
LL | check_epoll_wait::<TAG>(epfd, &[(expected_event, expected_value)], -1);
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the evaluated program deadlocked
|
= note: BACKTRACE on thread `unnamed-ID`:
= note: inside closure at tests/fail-dep/libc/libc_epoll_block_two_thread.rs:LL:CC
error: deadlock: the evaluated program deadlocked
|
= note: the evaluated program deadlocked
= note: (no span available)
= note: BACKTRACE on thread `unnamed-ID`:
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 4 previous errors

View File

@ -1,11 +0,0 @@
//@only-target: linux
fn main() {
// eventfd read will block when EFD_NONBLOCK flag is clear and counter = 0.
// This will pass when blocking is implemented.
let flags = libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
let mut buf: [u8; 8] = [0; 8];
let _res: i32 = unsafe {
libc::read(fd, buf.as_mut_ptr().cast(), buf.len() as libc::size_t).try_into().unwrap() //~ERROR: blocking is unsupported
};
}

View File

@ -1,14 +0,0 @@
error: unsupported operation: eventfd: blocking is unsupported
--> tests/fail-dep/libc/libc_eventfd_read_block.rs:LL:CC
|
LL | libc::read(fd, buf.as_mut_ptr().cast(), buf.len() as libc::size_t).try_into().unwrap()
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ eventfd: blocking is unsupported
|
= help: this is likely not a bug in the program; it indicates that the program performed an operation that Miri does not support
= note: BACKTRACE:
= note: inside `main` at tests/fail-dep/libc/libc_eventfd_read_block.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View File

@ -1,21 +0,0 @@
//@only-target: linux
fn main() {
// eventfd write will block when EFD_NONBLOCK flag is clear
// and the addition caused counter to exceed u64::MAX - 1.
// This will pass when blocking is implemented.
let flags = libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
// Write u64 - 1.
let mut sized_8_data: [u8; 8] = (u64::MAX - 1).to_ne_bytes();
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
};
assert_eq!(res, 8);
// Write 1.
sized_8_data = 1_u64.to_ne_bytes();
// Write 1 to the counter.
let _res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap() //~ERROR: blocking is unsupported
};
}

View File

@ -1,14 +0,0 @@
error: unsupported operation: eventfd: blocking is unsupported
--> tests/fail-dep/libc/libc_eventfd_write_block.rs:LL:CC
|
LL | libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ eventfd: blocking is unsupported
|
= help: this is likely not a bug in the program; it indicates that the program performed an operation that Miri does not support
= note: BACKTRACE:
= note: inside `main` at tests/fail-dep/libc/libc_eventfd_write_block.rs:LL:CC
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
error: aborting due to 1 previous error

View File

@ -8,8 +8,8 @@
struct Foo(u64);
impl Foo {
#[rustfmt::skip] // rustfmt is wrong about which line contains an error
fn add(&mut self, n: u64) -> u64 { //~ ERROR: /reborrow through .* is forbidden/
fn add(&mut self, n: u64) -> u64 {
//~^ ERROR: /reborrow through .* is forbidden/
self.0 + n
}
}

View File

@ -1,5 +1,5 @@
//@only-target: linux
// test_race depends on a deterministic schedule.
// test_race, test_blocking_read and test_blocking_write depend on a deterministic schedule.
//@compile-flags: -Zmiri-preemption-rate=0
// FIXME(static_mut_refs): Do not allow `static_mut_refs` lint
@ -11,6 +11,9 @@ fn main() {
test_read_write();
test_race();
test_syscall();
test_blocking_read();
test_blocking_write();
test_two_threads_blocked_on_eventfd();
}
fn read_bytes<const N: usize>(fd: i32, buf: &mut [u8; N]) -> i32 {
@ -118,3 +121,117 @@ fn test_syscall() {
let fd = unsafe { libc::syscall(libc::SYS_eventfd2, initval, flags) };
assert_ne!(fd, -1);
}
// This test will block on eventfd read then get unblocked by `write`.
fn test_blocking_read() {
// eventfd read will block when EFD_NONBLOCK flag is clear and counter = 0.
let flags = libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
let thread1 = thread::spawn(move || {
let mut buf: [u8; 8] = [0; 8];
// This will block.
let res = read_bytes(fd, &mut buf);
// read returns number of bytes has been read, which is always 8.
assert_eq!(res, 8);
let counter = u64::from_ne_bytes(buf);
assert_eq!(counter, 1);
});
let sized_8_data: [u8; 8] = 1_u64.to_ne_bytes();
// Pass control to thread1 so it can block on eventfd `read`.
thread::yield_now();
// Write 1 to the counter to unblock thread1.
let res = write_bytes(fd, sized_8_data);
assert_eq!(res, 8);
thread1.join().unwrap();
}
/// This test will block on eventfd `write` then get unblocked by `read`.
fn test_blocking_write() {
// eventfd write will block when EFD_NONBLOCK flag is clear
// and the addition caused counter to exceed u64::MAX - 1.
let flags = libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
// Write u64 - 1, so the all subsequent write will block.
let sized_8_data: [u8; 8] = (u64::MAX - 1).to_ne_bytes();
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
};
assert_eq!(res, 8);
let thread1 = thread::spawn(move || {
let sized_8_data = 1_u64.to_ne_bytes();
// Write 1 to the counter, this will block.
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
};
// Make sure that write is successful.
assert_eq!(res, 8);
});
let mut buf: [u8; 8] = [0; 8];
// Pass control to thread1 so it can block on eventfd `write`.
thread::yield_now();
// This will unblock previously blocked eventfd read.
let res = read_bytes(fd, &mut buf);
// read returns number of bytes has been read, which is always 8.
assert_eq!(res, 8);
let counter = u64::from_ne_bytes(buf);
assert_eq!(counter, (u64::MAX - 1));
thread1.join().unwrap();
}
// Test two threads blocked on eventfd.
// Expected behaviour:
// 1. thread1 and thread2 both blocked on `write`.
// 2. thread3 unblocks both thread1 and thread2
// 3. The write in thread1 and thread2 return successfully.
fn test_two_threads_blocked_on_eventfd() {
// eventfd write will block when EFD_NONBLOCK flag is clear
// and the addition caused counter to exceed u64::MAX - 1.
let flags = libc::EFD_CLOEXEC;
let fd = unsafe { libc::eventfd(0, flags) };
// Write u64 - 1, so the all subsequent write will block.
let sized_8_data: [u8; 8] = (u64::MAX - 1).to_ne_bytes();
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
};
assert_eq!(res, 8);
let thread1 = thread::spawn(move || {
thread::park();
let sized_8_data = 1_u64.to_ne_bytes();
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
};
// Make sure that write is successful.
assert_eq!(res, 8);
});
let thread2 = thread::spawn(move || {
thread::park();
let sized_8_data = 1_u64.to_ne_bytes();
let res: i64 = unsafe {
libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8).try_into().unwrap()
};
// Make sure that write is successful.
assert_eq!(res, 8);
});
let thread3 = thread::spawn(move || {
thread::park();
let mut buf: [u8; 8] = [0; 8];
// This will unblock previously blocked eventfd read.
let res = read_bytes(fd, &mut buf);
// read returns number of bytes has been read, which is always 8.
assert_eq!(res, 8);
let counter = u64::from_ne_bytes(buf);
assert_eq!(counter, (u64::MAX - 1));
});
thread1.thread().unpark();
thread2.thread().unpark();
thread3.thread().unpark();
thread1.join().unwrap();
thread2.join().unwrap();
thread3.join().unwrap();
}

View File

@ -959,10 +959,20 @@ pub fn libm() {
unsafe { ldexp(a, b) }
}
assert_approx_eq!(64f32.sqrt(), 8f32);
assert_approx_eq!(64f64.sqrt(), 8f64);
assert_eq!(64_f32.sqrt(), 8_f32);
assert_eq!(64_f64.sqrt(), 8_f64);
assert_eq!(f32::INFINITY.sqrt(), f32::INFINITY);
assert_eq!(f64::INFINITY.sqrt(), f64::INFINITY);
assert_eq!(0.0_f32.sqrt().total_cmp(&0.0), std::cmp::Ordering::Equal);
assert_eq!(0.0_f64.sqrt().total_cmp(&0.0), std::cmp::Ordering::Equal);
assert_eq!((-0.0_f32).sqrt().total_cmp(&-0.0), std::cmp::Ordering::Equal);
assert_eq!((-0.0_f64).sqrt().total_cmp(&-0.0), std::cmp::Ordering::Equal);
assert!((-5.0_f32).sqrt().is_nan());
assert!((-5.0_f64).sqrt().is_nan());
assert!(f32::NEG_INFINITY.sqrt().is_nan());
assert!(f64::NEG_INFINITY.sqrt().is_nan());
assert!(f32::NAN.sqrt().is_nan());
assert!(f64::NAN.sqrt().is_nan());
assert_approx_eq!(25f32.powi(-2), 0.0016f32);
assert_approx_eq!(23.2f64.powi(2), 538.24f64);

View File

@ -1,3 +1,6 @@
// FIXME: this miscompiles with optimizations, see <https://github.com/rust-lang/rust/issues/132898>.
//@compile-flags: -Zmir-opt-level=0
trait S: Sized {
fn tpb(&mut self, _s: Self) {}
}
@ -75,6 +78,25 @@ fn with_interior_mutability() {
});
}
// This one really shouldn't be accepted, but since we treat 2phase as raw, we do accept it.
// Tree Borrows rejects it.
fn aliasing_violation() {
struct Foo(u64);
impl Foo {
fn add(&mut self, n: u64) -> u64 {
self.0 + n
}
}
let mut f = Foo(0);
let alias = &mut f.0 as *mut u64;
let res = f.add(unsafe {
*alias = 42;
0
});
assert_eq!(res, 42);
}
fn main() {
two_phase1();
two_phase2();
@ -84,4 +106,5 @@ fn main() {
with_interior_mutability();
two_phase_overlapping1();
two_phase_overlapping2();
aliasing_violation();
}

View File

@ -8,6 +8,7 @@ fn main() {
mut_raw_then_mut_shr();
mut_shr_then_mut_raw();
mut_raw_mut();
mut_raw_mut2();
partially_invalidate_mut();
drop_after_sharing();
// direct_mut_to_const_raw();
@ -96,6 +97,18 @@ fn mut_raw_mut() {
assert_eq!(x, 4);
}
// A variant of `mut_raw_mut` that does *not* get accepted by Tree Borrows.
// It's kind of an accident that we accept it in Stacked Borrows...
fn mut_raw_mut2() {
unsafe {
let mut root = 0;
let to = &mut root as *mut i32;
*to = 0;
let _val = root;
*to = 0;
}
}
fn partially_invalidate_mut() {
let data = &mut (0u8, 0u8);
let reborrow = &mut *data as *mut (u8, u8);

View File

@ -4,11 +4,7 @@ error[E0477]: the type `&'a i32` does not fulfill the required lifetime
LL | impl<'a> Tr for &'a i32 {
| ^^^^^^^^^^^^^^^^^^^^^^^
|
note: type must satisfy the static lifetime as required by this binding
--> $DIR/specialize_with_generalize_lifetimes.rs:12:15
|
LL | impl<T: Any + 'static> Tr for T {
| ^^^^^^^
= note: type must satisfy the static lifetime
error[E0477]: the type `Wrapper<'a>` does not fulfill the required lifetime
--> $DIR/specialize_with_generalize_lifetimes.rs:31:1
@ -16,11 +12,7 @@ error[E0477]: the type `Wrapper<'a>` does not fulfill the required lifetime
LL | impl<'a> Tr for Wrapper<'a> {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
note: type must satisfy the static lifetime as required by this binding
--> $DIR/specialize_with_generalize_lifetimes.rs:12:15
|
LL | impl<T: Any + 'static> Tr for T {
| ^^^^^^^
= note: type must satisfy the static lifetime
error: aborting due to 2 previous errors

View File

@ -0,0 +1,16 @@
//@ check-pass
#![feature(const_trait_impl)]
#[const_trait]
trait Foo {}
impl<T> const Foo for (T,) where T: ~const Foo {}
const fn needs_const_foo(_: impl ~const Foo + Copy) {}
const fn test<T: ~const Foo + Copy>(t: T) {
needs_const_foo((t,));
}
fn main() {}