mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-01 06:51:58 +00:00
Rollup merge of #104989 - RalfJung:miri, r=RalfJung
update Miri r? `@ghost`
This commit is contained in:
commit
2ccb38b92d
@ -296,6 +296,13 @@ needed.
|
||||
|
||||
### Exporting changes to the rustc repo
|
||||
|
||||
Keep in mind that pushing is the most complicated job that josh has to do --
|
||||
pulling just filters the rustc history, but pushing needs to construct a new
|
||||
rustc history that would filter to the given Miri history! To avoid problems, it
|
||||
is a good idea to always pull immediately before you push. In particular, you
|
||||
should never do two josh pushes without an intermediate pull; that can lead to
|
||||
duplicated commits.
|
||||
|
||||
Josh needs to be running, as described above. We will use the josh proxy to push
|
||||
to your fork of rustc. Run the following in the Miri repo, assuming we are on an
|
||||
up-to-date master branch:
|
||||
|
@ -281,9 +281,10 @@ pub fn phase_rustc(mut args: impl Iterator<Item = String>, phase: RustcPhase) {
|
||||
eprintln!("[cargo-miri rustc] writing run info to `{}`", filename.display());
|
||||
}
|
||||
info.store(&filename);
|
||||
// For Windows, do the same thing again with `.exe` appended to the filename.
|
||||
// For Windows and WASM, do the same thing again with `.exe`/`.wasm` appended to the filename.
|
||||
// (Need to do this here as cargo moves that "binary" to a different place before running it.)
|
||||
info.store(&out_filename("", ".exe"));
|
||||
info.store(&out_filename("", ".wasm"));
|
||||
};
|
||||
|
||||
let runnable_crate = !info_query && is_runnable_crate();
|
||||
|
@ -1,6 +1,17 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
set -x
|
||||
|
||||
function begingroup {
|
||||
echo "::group::$@"
|
||||
set -x
|
||||
}
|
||||
|
||||
function endgroup {
|
||||
set +x
|
||||
echo "::endgroup"
|
||||
}
|
||||
|
||||
begingroup "Building Miri"
|
||||
|
||||
# Determine configuration for installed build
|
||||
echo "Installing release version of Miri"
|
||||
@ -14,14 +25,15 @@ export CARGO_EXTRA_FLAGS="--locked"
|
||||
./miri check --no-default-features # make sure this can be built
|
||||
./miri check --all-features # and this, too
|
||||
./miri build --all-targets # the build that all the `./miri test` below will use
|
||||
echo
|
||||
|
||||
endgroup
|
||||
|
||||
# Test
|
||||
function run_tests {
|
||||
if [ -n "${MIRI_TEST_TARGET+exists}" ]; then
|
||||
echo "Testing foreign architecture $MIRI_TEST_TARGET"
|
||||
begingroup "Testing foreign architecture $MIRI_TEST_TARGET"
|
||||
else
|
||||
echo "Testing host architecture"
|
||||
begingroup "Testing host architecture"
|
||||
fi
|
||||
|
||||
## ui test suite
|
||||
@ -52,7 +64,6 @@ function run_tests {
|
||||
echo 'build.rustc-wrapper = "thisdoesnotexist"' > .cargo/config.toml
|
||||
# Run the actual test
|
||||
${PYTHON} test-cargo-miri/run-test.py
|
||||
echo
|
||||
# Clean up
|
||||
unset RUSTC MIRI
|
||||
rm -rf .cargo
|
||||
@ -63,16 +74,23 @@ function run_tests {
|
||||
cargo miri run --manifest-path bench-cargo-miri/$BENCH/Cargo.toml
|
||||
done
|
||||
fi
|
||||
|
||||
endgroup
|
||||
}
|
||||
|
||||
function run_tests_minimal {
|
||||
if [ -n "${MIRI_TEST_TARGET+exists}" ]; then
|
||||
echo "Testing MINIMAL foreign architecture $MIRI_TEST_TARGET: only testing $@"
|
||||
begingroup "Testing MINIMAL foreign architecture $MIRI_TEST_TARGET: only testing $@"
|
||||
else
|
||||
echo "Testing MINIMAL host architecture: only testing $@"
|
||||
begingroup "Testing MINIMAL host architecture: only testing $@"
|
||||
fi
|
||||
|
||||
./miri test -- "$@"
|
||||
|
||||
# Ensure that a small smoke test of cargo-miri works.
|
||||
cargo miri run --manifest-path test-cargo-miri/no-std-smoke/Cargo.toml --target ${MIRI_TEST_TARGET-$HOST_TARGET}
|
||||
|
||||
endgroup
|
||||
}
|
||||
|
||||
# host
|
||||
@ -85,6 +103,7 @@ case $HOST_TARGET in
|
||||
MIRI_TEST_TARGET=i686-pc-windows-msvc run_tests
|
||||
MIRI_TEST_TARGET=x86_64-unknown-freebsd run_tests_minimal hello integer vec panic/panic concurrency/simple atomic data_race env/var
|
||||
MIRI_TEST_TARGET=aarch64-linux-android run_tests_minimal hello integer vec panic/panic
|
||||
MIRI_TEST_TARGET=wasm32-wasi MIRI_NO_STD=1 run_tests_minimal no_std # supports std but miri doesn't support it
|
||||
MIRI_TEST_TARGET=thumbv7em-none-eabihf MIRI_NO_STD=1 run_tests_minimal no_std # no_std embedded architecture
|
||||
;;
|
||||
x86_64-apple-darwin)
|
||||
|
@ -1 +1 @@
|
||||
7477c1f4f7d6bef037d523099b240d22aa1b63a0
|
||||
454784afba5bf35b5ff14ada0e31265ad1d75e73
|
||||
|
@ -838,18 +838,18 @@ impl VClockAlloc {
|
||||
&self,
|
||||
alloc_id: AllocId,
|
||||
range: AllocRange,
|
||||
global: &GlobalState,
|
||||
thread_mgr: &ThreadManager<'_, '_>,
|
||||
machine: &MiriMachine<'_, '_>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let global = machine.data_race.as_ref().unwrap();
|
||||
if global.race_detecting() {
|
||||
let (index, clocks) = global.current_thread_state(thread_mgr);
|
||||
let (index, clocks) = global.current_thread_state(&machine.threads);
|
||||
let mut alloc_ranges = self.alloc_ranges.borrow_mut();
|
||||
for (offset, range) in alloc_ranges.iter_mut(range.start, range.size) {
|
||||
if let Err(DataRace) = range.read_race_detect(&clocks, index) {
|
||||
// Report data-race.
|
||||
return Self::report_data_race(
|
||||
global,
|
||||
thread_mgr,
|
||||
&machine.threads,
|
||||
range,
|
||||
"Read",
|
||||
false,
|
||||
@ -869,17 +869,17 @@ impl VClockAlloc {
|
||||
alloc_id: AllocId,
|
||||
range: AllocRange,
|
||||
write_type: WriteType,
|
||||
global: &mut GlobalState,
|
||||
thread_mgr: &ThreadManager<'_, '_>,
|
||||
machine: &mut MiriMachine<'_, '_>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let global = machine.data_race.as_mut().unwrap();
|
||||
if global.race_detecting() {
|
||||
let (index, clocks) = global.current_thread_state(thread_mgr);
|
||||
let (index, clocks) = global.current_thread_state(&machine.threads);
|
||||
for (offset, range) in self.alloc_ranges.get_mut().iter_mut(range.start, range.size) {
|
||||
if let Err(DataRace) = range.write_race_detect(&clocks, index, write_type) {
|
||||
// Report data-race
|
||||
return Self::report_data_race(
|
||||
global,
|
||||
thread_mgr,
|
||||
&machine.threads,
|
||||
range,
|
||||
write_type.get_descriptor(),
|
||||
false,
|
||||
@ -901,10 +901,9 @@ impl VClockAlloc {
|
||||
&mut self,
|
||||
alloc_id: AllocId,
|
||||
range: AllocRange,
|
||||
global: &mut GlobalState,
|
||||
thread_mgr: &ThreadManager<'_, '_>,
|
||||
machine: &mut MiriMachine<'_, '_>,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.unique_access(alloc_id, range, WriteType::Write, global, thread_mgr)
|
||||
self.unique_access(alloc_id, range, WriteType::Write, machine)
|
||||
}
|
||||
|
||||
/// Detect data-races for an unsynchronized deallocate operation, will not perform
|
||||
@ -915,10 +914,9 @@ impl VClockAlloc {
|
||||
&mut self,
|
||||
alloc_id: AllocId,
|
||||
range: AllocRange,
|
||||
global: &mut GlobalState,
|
||||
thread_mgr: &ThreadManager<'_, '_>,
|
||||
machine: &mut MiriMachine<'_, '_>,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.unique_access(alloc_id, range, WriteType::Deallocate, global, thread_mgr)
|
||||
self.unique_access(alloc_id, range, WriteType::Deallocate, machine)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -118,6 +118,13 @@ pub struct Thread<'mir, 'tcx> {
|
||||
/// The virtual call stack.
|
||||
stack: Vec<Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>>,
|
||||
|
||||
/// The index of the topmost user-relevant frame in `stack`. This field must contain
|
||||
/// the value produced by `get_top_user_relevant_frame`.
|
||||
/// The `None` state here represents
|
||||
/// This field is a cache to reduce how often we call that method. The cache is manually
|
||||
/// maintained inside `MiriMachine::after_stack_push` and `MiriMachine::after_stack_pop`.
|
||||
top_user_relevant_frame: Option<usize>,
|
||||
|
||||
/// The join status.
|
||||
join_status: ThreadJoinStatus,
|
||||
|
||||
@ -147,6 +154,40 @@ impl<'mir, 'tcx> Thread<'mir, 'tcx> {
|
||||
fn thread_name(&self) -> &[u8] {
|
||||
if let Some(ref thread_name) = self.thread_name { thread_name } else { b"<unnamed>" }
|
||||
}
|
||||
|
||||
/// Return the top user-relevant frame, if there is one.
|
||||
/// Note that the choice to return `None` here when there is no user-relevant frame is part of
|
||||
/// justifying the optimization that only pushes of user-relevant frames require updating the
|
||||
/// `top_user_relevant_frame` field.
|
||||
fn compute_top_user_relevant_frame(&self) -> Option<usize> {
|
||||
self.stack
|
||||
.iter()
|
||||
.enumerate()
|
||||
.rev()
|
||||
.find_map(|(idx, frame)| if frame.extra.is_user_relevant { Some(idx) } else { None })
|
||||
}
|
||||
|
||||
/// Re-compute the top user-relevant frame from scratch.
|
||||
pub fn recompute_top_user_relevant_frame(&mut self) {
|
||||
self.top_user_relevant_frame = self.compute_top_user_relevant_frame();
|
||||
}
|
||||
|
||||
/// Set the top user-relevant frame to the given value. Must be equal to what
|
||||
/// `get_top_user_relevant_frame` would return!
|
||||
pub fn set_top_user_relevant_frame(&mut self, frame_idx: usize) {
|
||||
debug_assert_eq!(Some(frame_idx), self.compute_top_user_relevant_frame());
|
||||
self.top_user_relevant_frame = Some(frame_idx);
|
||||
}
|
||||
|
||||
/// Returns the topmost frame that is considered user-relevant, or the
|
||||
/// top of the stack if there is no such frame, or `None` if the stack is empty.
|
||||
pub fn top_user_relevant_frame(&self) -> Option<usize> {
|
||||
debug_assert_eq!(self.top_user_relevant_frame, self.compute_top_user_relevant_frame());
|
||||
// This can be called upon creation of an allocation. We create allocations while setting up
|
||||
// parts of the Rust runtime when we do not have any stack frames yet, so we need to handle
|
||||
// empty stacks.
|
||||
self.top_user_relevant_frame.or_else(|| self.stack.len().checked_sub(1))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> std::fmt::Debug for Thread<'mir, 'tcx> {
|
||||
@ -167,6 +208,7 @@ impl<'mir, 'tcx> Default for Thread<'mir, 'tcx> {
|
||||
state: ThreadState::Enabled,
|
||||
thread_name: None,
|
||||
stack: Vec::new(),
|
||||
top_user_relevant_frame: None,
|
||||
join_status: ThreadJoinStatus::Joinable,
|
||||
panic_payload: None,
|
||||
last_error: None,
|
||||
@ -184,8 +226,15 @@ impl<'mir, 'tcx> Thread<'mir, 'tcx> {
|
||||
|
||||
impl VisitTags for Thread<'_, '_> {
|
||||
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
|
||||
let Thread { panic_payload, last_error, stack, state: _, thread_name: _, join_status: _ } =
|
||||
self;
|
||||
let Thread {
|
||||
panic_payload,
|
||||
last_error,
|
||||
stack,
|
||||
top_user_relevant_frame: _,
|
||||
state: _,
|
||||
thread_name: _,
|
||||
join_status: _,
|
||||
} = self;
|
||||
|
||||
panic_payload.visit_tags(visit);
|
||||
last_error.visit_tags(visit);
|
||||
@ -414,7 +463,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
|
||||
}
|
||||
|
||||
/// Get a shared borrow of the currently active thread.
|
||||
fn active_thread_ref(&self) -> &Thread<'mir, 'tcx> {
|
||||
pub fn active_thread_ref(&self) -> &Thread<'mir, 'tcx> {
|
||||
&self.threads[self.active_thread]
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ use std::thread;
|
||||
use log::info;
|
||||
|
||||
use rustc_data_structures::fx::FxHashSet;
|
||||
use rustc_hir::def::Namespace;
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_middle::ty::{
|
||||
self,
|
||||
@ -195,7 +196,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>(
|
||||
MiriMachine::late_init(&mut ecx, config)?;
|
||||
|
||||
// Make sure we have MIR. We check MIR for some stable monomorphic function in libcore.
|
||||
let sentinel = ecx.try_resolve_path(&["core", "ascii", "escape_default"]);
|
||||
let sentinel = ecx.try_resolve_path(&["core", "ascii", "escape_default"], Namespace::ValueNS);
|
||||
if !matches!(sentinel, Some(s) if tcx.is_mir_available(s.def.def_id())) {
|
||||
tcx.sess.fatal(
|
||||
"the current sysroot was built without `-Zalways-encode-mir`, or libcore seems missing. \
|
||||
|
@ -2,12 +2,12 @@ pub mod convert;
|
||||
|
||||
use std::cmp;
|
||||
use std::iter;
|
||||
use std::mem;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::time::Duration;
|
||||
|
||||
use log::trace;
|
||||
|
||||
use rustc_hir::def::{DefKind, Namespace};
|
||||
use rustc_hir::def_id::{DefId, CRATE_DEF_INDEX};
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::ty::{
|
||||
@ -74,40 +74,67 @@ const UNIX_IO_ERROR_TABLE: &[(&str, std::io::ErrorKind)] = {
|
||||
};
|
||||
|
||||
/// Gets an instance for a path.
|
||||
fn try_resolve_did<'tcx>(tcx: TyCtxt<'tcx>, path: &[&str]) -> Option<DefId> {
|
||||
tcx.crates(()).iter().find(|&&krate| tcx.crate_name(krate).as_str() == path[0]).and_then(
|
||||
|krate| {
|
||||
let krate = DefId { krate: *krate, index: CRATE_DEF_INDEX };
|
||||
let mut items = tcx.module_children(krate);
|
||||
let mut path_it = path.iter().skip(1).peekable();
|
||||
///
|
||||
/// A `None` namespace indicates we are looking for a module.
|
||||
fn try_resolve_did(tcx: TyCtxt<'_>, path: &[&str], namespace: Option<Namespace>) -> Option<DefId> {
|
||||
/// Yield all children of the given item, that have the given name.
|
||||
fn find_children<'tcx: 'a, 'a>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
item: DefId,
|
||||
name: &'a str,
|
||||
) -> impl Iterator<Item = DefId> + 'a {
|
||||
tcx.module_children(item)
|
||||
.iter()
|
||||
.filter(move |item| item.ident.name.as_str() == name)
|
||||
.map(move |item| item.res.def_id())
|
||||
}
|
||||
|
||||
while let Some(segment) = path_it.next() {
|
||||
for item in mem::take(&mut items).iter() {
|
||||
if item.ident.name.as_str() == *segment {
|
||||
if path_it.peek().is_none() {
|
||||
return Some(item.res.def_id());
|
||||
}
|
||||
// Take apart the path: leading crate, a sequence of modules, and potentially a final item.
|
||||
let (&crate_name, path) = path.split_first().expect("paths must have at least one segment");
|
||||
let (modules, item) = if let Some(namespace) = namespace {
|
||||
let (&item_name, modules) =
|
||||
path.split_last().expect("non-module paths must have at least 2 segments");
|
||||
(modules, Some((item_name, namespace)))
|
||||
} else {
|
||||
(path, None)
|
||||
};
|
||||
|
||||
items = tcx.module_children(item.res.def_id());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
},
|
||||
)
|
||||
// First find the crate.
|
||||
let krate =
|
||||
tcx.crates(()).iter().find(|&&krate| tcx.crate_name(krate).as_str() == crate_name)?;
|
||||
let mut cur_item = DefId { krate: *krate, index: CRATE_DEF_INDEX };
|
||||
// Then go over the modules.
|
||||
for &segment in modules {
|
||||
cur_item = find_children(tcx, cur_item, segment)
|
||||
.find(|item| tcx.def_kind(item) == DefKind::Mod)?;
|
||||
}
|
||||
// Finally, look up the desired item in this module, if any.
|
||||
match item {
|
||||
Some((item_name, namespace)) =>
|
||||
Some(
|
||||
find_children(tcx, cur_item, item_name)
|
||||
.find(|item| tcx.def_kind(item).ns() == Some(namespace))?,
|
||||
),
|
||||
None => Some(cur_item),
|
||||
}
|
||||
}
|
||||
|
||||
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
/// Checks if the given crate/module exists.
|
||||
fn have_module(&self, path: &[&str]) -> bool {
|
||||
try_resolve_did(*self.eval_context_ref().tcx, path, None).is_some()
|
||||
}
|
||||
|
||||
/// Gets an instance for a path; fails gracefully if the path does not exist.
|
||||
fn try_resolve_path(&self, path: &[&str]) -> Option<ty::Instance<'tcx>> {
|
||||
let did = try_resolve_did(self.eval_context_ref().tcx.tcx, path)?;
|
||||
Some(ty::Instance::mono(self.eval_context_ref().tcx.tcx, did))
|
||||
fn try_resolve_path(&self, path: &[&str], namespace: Namespace) -> Option<ty::Instance<'tcx>> {
|
||||
let tcx = self.eval_context_ref().tcx.tcx;
|
||||
let did = try_resolve_did(tcx, path, Some(namespace))?;
|
||||
Some(ty::Instance::mono(tcx, did))
|
||||
}
|
||||
|
||||
/// Gets an instance for a path.
|
||||
fn resolve_path(&self, path: &[&str]) -> ty::Instance<'tcx> {
|
||||
self.try_resolve_path(path)
|
||||
fn resolve_path(&self, path: &[&str], namespace: Namespace) -> ty::Instance<'tcx> {
|
||||
self.try_resolve_path(path, namespace)
|
||||
.unwrap_or_else(|| panic!("failed to find required Rust item: {path:?}"))
|
||||
}
|
||||
|
||||
@ -115,7 +142,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
/// if the path could be resolved, and None otherwise
|
||||
fn eval_path_scalar(&self, path: &[&str]) -> InterpResult<'tcx, Scalar<Provenance>> {
|
||||
let this = self.eval_context_ref();
|
||||
let instance = this.resolve_path(path);
|
||||
let instance = this.resolve_path(path, Namespace::ValueNS);
|
||||
let cid = GlobalId { instance, promoted: None };
|
||||
// We don't give a span -- this isn't actually used directly by the program anyway.
|
||||
let const_val = this.eval_global(cid, None)?;
|
||||
@ -147,7 +174,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
/// Helper function to get the `TyAndLayout` of a `libc` type
|
||||
fn libc_ty_layout(&self, name: &str) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
|
||||
let this = self.eval_context_ref();
|
||||
let ty = this.resolve_path(&["libc", name]).ty(*this.tcx, ty::ParamEnv::reveal_all());
|
||||
let ty = this
|
||||
.resolve_path(&["libc", name], Namespace::TypeNS)
|
||||
.ty(*this.tcx, ty::ParamEnv::reveal_all());
|
||||
this.layout_of(ty)
|
||||
}
|
||||
|
||||
@ -155,7 +184,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
fn windows_ty_layout(&self, name: &str) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
|
||||
let this = self.eval_context_ref();
|
||||
let ty = this
|
||||
.resolve_path(&["std", "sys", "windows", "c", name])
|
||||
.resolve_path(&["std", "sys", "windows", "c", name], Namespace::TypeNS)
|
||||
.ty(*this.tcx, ty::ParamEnv::reveal_all());
|
||||
this.layout_of(ty)
|
||||
}
|
||||
@ -936,31 +965,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
|
||||
pub fn current_span(&self) -> CurrentSpan<'_, 'mir, 'tcx> {
|
||||
CurrentSpan { current_frame_idx: None, machine: self }
|
||||
}
|
||||
}
|
||||
|
||||
/// A `CurrentSpan` should be created infrequently (ideally once) per interpreter step. It does
|
||||
/// nothing on creation, but when `CurrentSpan::get` is called, searches the current stack for the
|
||||
/// topmost frame which corresponds to a local crate, and returns the current span in that frame.
|
||||
/// The result of that search is cached so that later calls are approximately free.
|
||||
#[derive(Clone)]
|
||||
pub struct CurrentSpan<'a, 'mir, 'tcx> {
|
||||
current_frame_idx: Option<usize>,
|
||||
machine: &'a MiriMachine<'mir, 'tcx>,
|
||||
}
|
||||
|
||||
impl<'a, 'mir: 'a, 'tcx: 'a + 'mir> CurrentSpan<'a, 'mir, 'tcx> {
|
||||
pub fn machine(&self) -> &'a MiriMachine<'mir, 'tcx> {
|
||||
self.machine
|
||||
}
|
||||
|
||||
/// Get the current span, skipping non-local frames.
|
||||
/// Get the current span in the topmost function which is workspace-local and not
|
||||
/// `#[track_caller]`.
|
||||
/// This function is backed by a cache, and can be assumed to be very fast.
|
||||
pub fn get(&mut self) -> Span {
|
||||
let idx = self.current_frame_idx();
|
||||
self.stack().get(idx).map(Frame::current_span).unwrap_or(rustc_span::DUMMY_SP)
|
||||
/// It will work even when the stack is empty.
|
||||
pub fn current_span(&self) -> Span {
|
||||
self.top_user_relevant_frame()
|
||||
.map(|frame_idx| self.stack()[frame_idx].current_span())
|
||||
.unwrap_or(rustc_span::DUMMY_SP)
|
||||
}
|
||||
|
||||
/// Returns the span of the *caller* of the current operation, again
|
||||
@ -968,46 +980,27 @@ impl<'a, 'mir: 'a, 'tcx: 'a + 'mir> CurrentSpan<'a, 'mir, 'tcx> {
|
||||
/// current operation is not in a local crate.
|
||||
/// This is useful when we are processing something which occurs on function-entry and we want
|
||||
/// to point at the call to the function, not the function definition generally.
|
||||
pub fn get_caller(&mut self) -> Span {
|
||||
pub fn caller_span(&self) -> Span {
|
||||
// We need to go down at least to the caller (len - 2), or however
|
||||
// far we have to go to find a frame in a local crate.
|
||||
let local_frame_idx = self.current_frame_idx();
|
||||
let stack = self.stack();
|
||||
let idx = cmp::min(local_frame_idx, stack.len().saturating_sub(2));
|
||||
stack.get(idx).map(Frame::current_span).unwrap_or(rustc_span::DUMMY_SP)
|
||||
// far we have to go to find a frame in a local crate which is also not #[track_caller].
|
||||
let frame_idx = self.top_user_relevant_frame().unwrap();
|
||||
let frame_idx = cmp::min(frame_idx, self.stack().len().checked_sub(2).unwrap());
|
||||
self.stack()[frame_idx].current_span()
|
||||
}
|
||||
|
||||
fn stack(&self) -> &[Frame<'mir, 'tcx, Provenance, machine::FrameData<'tcx>>] {
|
||||
self.machine.threads.active_thread_stack()
|
||||
self.threads.active_thread_stack()
|
||||
}
|
||||
|
||||
fn current_frame_idx(&mut self) -> usize {
|
||||
*self
|
||||
.current_frame_idx
|
||||
.get_or_insert_with(|| Self::compute_current_frame_index(self.machine))
|
||||
fn top_user_relevant_frame(&self) -> Option<usize> {
|
||||
self.threads.active_thread_ref().top_user_relevant_frame()
|
||||
}
|
||||
|
||||
// Find the position of the inner-most frame which is part of the crate being
|
||||
// compiled/executed, part of the Cargo workspace, and is also not #[track_caller].
|
||||
#[inline(never)]
|
||||
fn compute_current_frame_index(machine: &MiriMachine<'_, '_>) -> usize {
|
||||
machine
|
||||
.threads
|
||||
.active_thread_stack()
|
||||
.iter()
|
||||
.enumerate()
|
||||
.rev()
|
||||
.find_map(|(idx, frame)| {
|
||||
let def_id = frame.instance.def_id();
|
||||
if (def_id.is_local() || machine.local_crates.contains(&def_id.krate))
|
||||
&& !frame.instance.def.requires_caller_location(machine.tcx)
|
||||
{
|
||||
Some(idx)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.unwrap_or(0)
|
||||
/// This is the source of truth for the `is_user_relevant` flag in our `FrameExtra`.
|
||||
pub fn is_user_relevant(&self, frame: &Frame<'mir, 'tcx, Provenance>) -> bool {
|
||||
let def_id = frame.instance.def_id();
|
||||
(def_id.is_local() || self.local_crates.contains(&def_id.krate))
|
||||
&& !frame.instance.def.requires_caller_location(self.tcx)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ pub use crate::diagnostics::{
|
||||
pub use crate::eval::{
|
||||
create_ecx, eval_entry, AlignmentCheck, BacktraceStyle, IsolatedOp, MiriConfig, RejectOpWith,
|
||||
};
|
||||
pub use crate::helpers::{CurrentSpan, EvalContextExt as _};
|
||||
pub use crate::helpers::EvalContextExt as _;
|
||||
pub use crate::intptrcast::ProvenanceMode;
|
||||
pub use crate::machine::{
|
||||
AllocExtra, FrameData, MiriInterpCx, MiriInterpCxExt, MiriMachine, MiriMemoryKind,
|
||||
|
@ -50,12 +50,18 @@ pub struct FrameData<'tcx> {
|
||||
/// for the start of this frame. When we finish executing this frame,
|
||||
/// we use this to register a completed event with `measureme`.
|
||||
pub timing: Option<measureme::DetachedTiming>,
|
||||
|
||||
/// Indicates whether a `Frame` is part of a workspace-local crate and is also not
|
||||
/// `#[track_caller]`. We compute this once on creation and store the result, as an
|
||||
/// optimization.
|
||||
/// This is used by `MiriMachine::current_span` and `MiriMachine::caller_span`
|
||||
pub is_user_relevant: bool,
|
||||
}
|
||||
|
||||
impl<'tcx> std::fmt::Debug for FrameData<'tcx> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// Omitting `timing`, it does not support `Debug`.
|
||||
let FrameData { stacked_borrows, catch_unwind, timing: _ } = self;
|
||||
let FrameData { stacked_borrows, catch_unwind, timing: _, is_user_relevant: _ } = self;
|
||||
f.debug_struct("FrameData")
|
||||
.field("stacked_borrows", stacked_borrows)
|
||||
.field("catch_unwind", catch_unwind)
|
||||
@ -65,7 +71,7 @@ impl<'tcx> std::fmt::Debug for FrameData<'tcx> {
|
||||
|
||||
impl VisitTags for FrameData<'_> {
|
||||
fn visit_tags(&self, visit: &mut dyn FnMut(SbTag)) {
|
||||
let FrameData { catch_unwind, stacked_borrows, timing: _ } = self;
|
||||
let FrameData { catch_unwind, stacked_borrows, timing: _, is_user_relevant: _ } = self;
|
||||
|
||||
catch_unwind.visit_tags(visit);
|
||||
stacked_borrows.visit_tags(visit);
|
||||
@ -895,13 +901,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
|
||||
|
||||
let alloc = alloc.into_owned();
|
||||
let stacks = ecx.machine.stacked_borrows.as_ref().map(|stacked_borrows| {
|
||||
Stacks::new_allocation(
|
||||
id,
|
||||
alloc.size(),
|
||||
stacked_borrows,
|
||||
kind,
|
||||
ecx.machine.current_span(),
|
||||
)
|
||||
Stacks::new_allocation(id, alloc.size(), stacked_borrows, kind, &ecx.machine)
|
||||
});
|
||||
let race_alloc = ecx.machine.data_race.as_ref().map(|data_race| {
|
||||
data_race::AllocExtra::new_allocation(
|
||||
@ -1003,22 +1003,12 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
|
||||
range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
if let Some(data_race) = &alloc_extra.data_race {
|
||||
data_race.read(
|
||||
alloc_id,
|
||||
range,
|
||||
machine.data_race.as_ref().unwrap(),
|
||||
&machine.threads,
|
||||
)?;
|
||||
data_race.read(alloc_id, range, machine)?;
|
||||
}
|
||||
if let Some(stacked_borrows) = &alloc_extra.stacked_borrows {
|
||||
stacked_borrows.borrow_mut().before_memory_read(
|
||||
alloc_id,
|
||||
prov_extra,
|
||||
range,
|
||||
machine.stacked_borrows.as_ref().unwrap(),
|
||||
machine.current_span(),
|
||||
&machine.threads,
|
||||
)?;
|
||||
stacked_borrows
|
||||
.borrow_mut()
|
||||
.before_memory_read(alloc_id, prov_extra, range, machine)?;
|
||||
}
|
||||
if let Some(weak_memory) = &alloc_extra.weak_memory {
|
||||
weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
|
||||
@ -1035,22 +1025,10 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
|
||||
range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
if let Some(data_race) = &mut alloc_extra.data_race {
|
||||
data_race.write(
|
||||
alloc_id,
|
||||
range,
|
||||
machine.data_race.as_mut().unwrap(),
|
||||
&machine.threads,
|
||||
)?;
|
||||
data_race.write(alloc_id, range, machine)?;
|
||||
}
|
||||
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
|
||||
stacked_borrows.get_mut().before_memory_write(
|
||||
alloc_id,
|
||||
prov_extra,
|
||||
range,
|
||||
machine.stacked_borrows.as_ref().unwrap(),
|
||||
machine.current_span(),
|
||||
&machine.threads,
|
||||
)?;
|
||||
stacked_borrows.get_mut().before_memory_write(alloc_id, prov_extra, range, machine)?;
|
||||
}
|
||||
if let Some(weak_memory) = &alloc_extra.weak_memory {
|
||||
weak_memory.memory_accessed(range, machine.data_race.as_ref().unwrap());
|
||||
@ -1070,21 +1048,14 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
|
||||
machine.emit_diagnostic(NonHaltingDiagnostic::FreedAlloc(alloc_id));
|
||||
}
|
||||
if let Some(data_race) = &mut alloc_extra.data_race {
|
||||
data_race.deallocate(
|
||||
alloc_id,
|
||||
range,
|
||||
machine.data_race.as_mut().unwrap(),
|
||||
&machine.threads,
|
||||
)?;
|
||||
data_race.deallocate(alloc_id, range, machine)?;
|
||||
}
|
||||
if let Some(stacked_borrows) = &mut alloc_extra.stacked_borrows {
|
||||
stacked_borrows.get_mut().before_memory_deallocation(
|
||||
alloc_id,
|
||||
prove_extra,
|
||||
range,
|
||||
machine.stacked_borrows.as_ref().unwrap(),
|
||||
machine.current_span(),
|
||||
&machine.threads,
|
||||
machine,
|
||||
)
|
||||
} else {
|
||||
Ok(())
|
||||
@ -1126,7 +1097,9 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
|
||||
stacked_borrows: stacked_borrows.map(|sb| sb.borrow_mut().new_frame(&ecx.machine)),
|
||||
catch_unwind: None,
|
||||
timing,
|
||||
is_user_relevant: ecx.machine.is_user_relevant(&frame),
|
||||
};
|
||||
|
||||
Ok(frame.with_extra(extra))
|
||||
}
|
||||
|
||||
@ -1174,6 +1147,13 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
|
||||
|
||||
#[inline(always)]
|
||||
fn after_stack_push(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
|
||||
if ecx.frame().extra.is_user_relevant {
|
||||
// We just pushed a local frame, so we know that the topmost local frame is the topmost
|
||||
// frame. If we push a non-local frame, there's no need to do anything.
|
||||
let stack_len = ecx.active_thread_stack().len();
|
||||
ecx.active_thread_mut().set_top_user_relevant_frame(stack_len - 1);
|
||||
}
|
||||
|
||||
if ecx.machine.stacked_borrows.is_some() { ecx.retag_return_place() } else { Ok(()) }
|
||||
}
|
||||
|
||||
@ -1183,6 +1163,13 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for MiriMachine<'mir, 'tcx> {
|
||||
mut frame: Frame<'mir, 'tcx, Provenance, FrameData<'tcx>>,
|
||||
unwinding: bool,
|
||||
) -> InterpResult<'tcx, StackPopJump> {
|
||||
if frame.extra.is_user_relevant {
|
||||
// All that we store is whether or not the frame we just removed is local, so now we
|
||||
// have no idea where the next topmost local frame is. So we recompute it.
|
||||
// (If this ever becomes a bottleneck, we could have `push` store the previous
|
||||
// user-relevant frame and restore that here.)
|
||||
ecx.active_thread_mut().recompute_top_user_relevant_frame();
|
||||
}
|
||||
let timing = frame.extra.timing.take();
|
||||
if let Some(stacked_borrows) = &ecx.machine.stacked_borrows {
|
||||
stacked_borrows.borrow_mut().end_call(&frame.extra);
|
||||
|
@ -18,12 +18,12 @@ pub enum PathConversion {
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
pub fn os_str_to_bytes<'a, 'tcx>(os_str: &'a OsStr) -> InterpResult<'tcx, &'a [u8]> {
|
||||
pub fn os_str_to_bytes<'tcx>(os_str: &OsStr) -> InterpResult<'tcx, &[u8]> {
|
||||
Ok(os_str.as_bytes())
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
pub fn os_str_to_bytes<'a, 'tcx>(os_str: &'a OsStr) -> InterpResult<'tcx, &'a [u8]> {
|
||||
pub fn os_str_to_bytes<'tcx>(os_str: &OsStr) -> InterpResult<'tcx, &[u8]> {
|
||||
// On non-unix platforms the best we can do to transform bytes from/to OS strings is to do the
|
||||
// intermediate transformation into strings. Which invalidates non-utf8 paths that are actually
|
||||
// valid.
|
||||
@ -34,11 +34,11 @@ pub fn os_str_to_bytes<'a, 'tcx>(os_str: &'a OsStr) -> InterpResult<'tcx, &'a [u
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
pub fn bytes_to_os_str<'a, 'tcx>(bytes: &'a [u8]) -> InterpResult<'tcx, &'a OsStr> {
|
||||
pub fn bytes_to_os_str<'tcx>(bytes: &[u8]) -> InterpResult<'tcx, &OsStr> {
|
||||
Ok(OsStr::from_bytes(bytes))
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
pub fn bytes_to_os_str<'a, 'tcx>(bytes: &'a [u8]) -> InterpResult<'tcx, &'a OsStr> {
|
||||
pub fn bytes_to_os_str<'tcx>(bytes: &[u8]) -> InterpResult<'tcx, &OsStr> {
|
||||
let s = std::str::from_utf8(bytes)
|
||||
.map_err(|_| err_unsup_format!("{:?} is not a valid utf-8 string", bytes))?;
|
||||
Ok(OsStr::new(s))
|
||||
|
@ -261,6 +261,11 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
// (that would be basically https://github.com/rust-lang/miri/issues/450),
|
||||
// we specifically look up the static in libstd that we know is placed
|
||||
// in that section.
|
||||
if !this.have_module(&["std"]) {
|
||||
// Looks like we are running in a `no_std` crate.
|
||||
// That also means no TLS dtors callback to call.
|
||||
return Ok(());
|
||||
}
|
||||
let thread_callback =
|
||||
this.eval_windows("thread_local_key", "p_thread_callback")?.to_pointer(this)?;
|
||||
let thread_callback = this.get_ptr_fn(thread_callback)?.as_instance()?;
|
||||
|
@ -11,7 +11,6 @@ use std::time::SystemTime;
|
||||
use log::trace;
|
||||
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_middle::ty::{self, layout::LayoutOf};
|
||||
use rustc_target::abi::{Align, Size};
|
||||
|
||||
use crate::shims::os_str::bytes_to_os_str;
|
||||
@ -1006,12 +1005,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
// as `isize`s instead of having the proper types. Thus, we have to recover the layout of
|
||||
// `statxbuf_op` by using the `libc::statx` struct type.
|
||||
let statxbuf = {
|
||||
// FIXME: This long path is required because `libc::statx` is an struct and also a
|
||||
// function and `resolve_path` is returning the latter.
|
||||
let statx_ty = this
|
||||
.resolve_path(&["libc", "unix", "linux_like", "linux", "gnu", "statx"])
|
||||
.ty(*this.tcx, ty::ParamEnv::reveal_all());
|
||||
let statx_layout = this.layout_of(statx_ty)?;
|
||||
let statx_layout = this.libc_ty_layout("statx")?;
|
||||
MPlaceTy::from_aligned_ptr(statxbuf_ptr, statx_layout)
|
||||
};
|
||||
|
||||
@ -1917,8 +1911,8 @@ struct FileMetadata {
|
||||
}
|
||||
|
||||
impl FileMetadata {
|
||||
fn from_path<'tcx, 'mir>(
|
||||
ecx: &mut MiriInterpCx<'mir, 'tcx>,
|
||||
fn from_path<'tcx>(
|
||||
ecx: &mut MiriInterpCx<'_, 'tcx>,
|
||||
path: &Path,
|
||||
follow_symlink: bool,
|
||||
) -> InterpResult<'tcx, Option<FileMetadata>> {
|
||||
@ -1928,8 +1922,8 @@ impl FileMetadata {
|
||||
FileMetadata::from_meta(ecx, metadata)
|
||||
}
|
||||
|
||||
fn from_fd<'tcx, 'mir>(
|
||||
ecx: &mut MiriInterpCx<'mir, 'tcx>,
|
||||
fn from_fd<'tcx>(
|
||||
ecx: &mut MiriInterpCx<'_, 'tcx>,
|
||||
fd: i32,
|
||||
) -> InterpResult<'tcx, Option<FileMetadata>> {
|
||||
let option = ecx.machine.file_handler.handles.get(&fd);
|
||||
@ -1942,8 +1936,8 @@ impl FileMetadata {
|
||||
FileMetadata::from_meta(ecx, metadata)
|
||||
}
|
||||
|
||||
fn from_meta<'tcx, 'mir>(
|
||||
ecx: &mut MiriInterpCx<'mir, 'tcx>,
|
||||
fn from_meta<'tcx>(
|
||||
ecx: &mut MiriInterpCx<'_, 'tcx>,
|
||||
metadata: Result<std::fs::Metadata, std::io::Error>,
|
||||
) -> InterpResult<'tcx, Option<FileMetadata>> {
|
||||
let metadata = match metadata {
|
||||
|
@ -5,8 +5,7 @@ use rustc_middle::mir::interpret::{alloc_range, AllocId, AllocRange};
|
||||
use rustc_span::{Span, SpanData};
|
||||
use rustc_target::abi::Size;
|
||||
|
||||
use crate::helpers::CurrentSpan;
|
||||
use crate::stacked_borrows::{err_sb_ub, AccessKind, GlobalStateInner, Permission};
|
||||
use crate::stacked_borrows::{err_sb_ub, AccessKind, GlobalStateInner, Permission, ProtectorKind};
|
||||
use crate::*;
|
||||
|
||||
use rustc_middle::mir::interpret::InterpError;
|
||||
@ -110,42 +109,29 @@ pub struct TagHistory {
|
||||
pub protected: Option<(String, SpanData)>,
|
||||
}
|
||||
|
||||
pub struct DiagnosticCxBuilder<'span, 'ecx, 'mir, 'tcx> {
|
||||
pub struct DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
|
||||
operation: Operation,
|
||||
// 'span cannot be merged with any other lifetime since they appear invariantly, under the
|
||||
// mutable ref.
|
||||
current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
|
||||
threads: &'ecx ThreadManager<'mir, 'tcx>,
|
||||
machine: &'ecx MiriMachine<'mir, 'tcx>,
|
||||
}
|
||||
|
||||
pub struct DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> {
|
||||
pub struct DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
|
||||
operation: Operation,
|
||||
// 'span and 'history cannot be merged, since when we call `unbuild` we need
|
||||
// to return the exact 'span that was used when calling `build`.
|
||||
current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
|
||||
threads: &'ecx ThreadManager<'mir, 'tcx>,
|
||||
machine: &'ecx MiriMachine<'mir, 'tcx>,
|
||||
history: &'history mut AllocHistory,
|
||||
offset: Size,
|
||||
}
|
||||
|
||||
impl<'span, 'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'span, 'ecx, 'mir, 'tcx> {
|
||||
impl<'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
|
||||
pub fn build<'history>(
|
||||
self,
|
||||
history: &'history mut AllocHistory,
|
||||
offset: Size,
|
||||
) -> DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> {
|
||||
DiagnosticCx {
|
||||
operation: self.operation,
|
||||
current_span: self.current_span,
|
||||
threads: self.threads,
|
||||
history,
|
||||
offset,
|
||||
}
|
||||
) -> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
|
||||
DiagnosticCx { operation: self.operation, machine: self.machine, history, offset }
|
||||
}
|
||||
|
||||
pub fn retag(
|
||||
current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
|
||||
threads: &'ecx ThreadManager<'mir, 'tcx>,
|
||||
machine: &'ecx MiriMachine<'mir, 'tcx>,
|
||||
cause: RetagCause,
|
||||
new_tag: SbTag,
|
||||
orig_tag: ProvenanceExtra,
|
||||
@ -154,46 +140,36 @@ impl<'span, 'ecx, 'mir, 'tcx> DiagnosticCxBuilder<'span, 'ecx, 'mir, 'tcx> {
|
||||
let operation =
|
||||
Operation::Retag(RetagOp { cause, new_tag, orig_tag, range, permission: None });
|
||||
|
||||
DiagnosticCxBuilder { current_span, threads, operation }
|
||||
DiagnosticCxBuilder { machine, operation }
|
||||
}
|
||||
|
||||
pub fn read(
|
||||
current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
|
||||
threads: &'ecx ThreadManager<'mir, 'tcx>,
|
||||
machine: &'ecx MiriMachine<'mir, 'tcx>,
|
||||
tag: ProvenanceExtra,
|
||||
range: AllocRange,
|
||||
) -> Self {
|
||||
let operation = Operation::Access(AccessOp { kind: AccessKind::Read, tag, range });
|
||||
DiagnosticCxBuilder { current_span, threads, operation }
|
||||
DiagnosticCxBuilder { machine, operation }
|
||||
}
|
||||
|
||||
pub fn write(
|
||||
current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
|
||||
threads: &'ecx ThreadManager<'mir, 'tcx>,
|
||||
machine: &'ecx MiriMachine<'mir, 'tcx>,
|
||||
tag: ProvenanceExtra,
|
||||
range: AllocRange,
|
||||
) -> Self {
|
||||
let operation = Operation::Access(AccessOp { kind: AccessKind::Write, tag, range });
|
||||
DiagnosticCxBuilder { current_span, threads, operation }
|
||||
DiagnosticCxBuilder { machine, operation }
|
||||
}
|
||||
|
||||
pub fn dealloc(
|
||||
current_span: &'span mut CurrentSpan<'ecx, 'mir, 'tcx>,
|
||||
threads: &'ecx ThreadManager<'mir, 'tcx>,
|
||||
tag: ProvenanceExtra,
|
||||
) -> Self {
|
||||
pub fn dealloc(machine: &'ecx MiriMachine<'mir, 'tcx>, tag: ProvenanceExtra) -> Self {
|
||||
let operation = Operation::Dealloc(DeallocOp { tag });
|
||||
DiagnosticCxBuilder { current_span, threads, operation }
|
||||
DiagnosticCxBuilder { machine, operation }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> {
|
||||
pub fn unbuild(self) -> DiagnosticCxBuilder<'span, 'ecx, 'mir, 'tcx> {
|
||||
DiagnosticCxBuilder {
|
||||
operation: self.operation,
|
||||
current_span: self.current_span,
|
||||
threads: self.threads,
|
||||
}
|
||||
impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
|
||||
pub fn unbuild(self) -> DiagnosticCxBuilder<'ecx, 'mir, 'tcx> {
|
||||
DiagnosticCxBuilder { machine: self.machine, operation: self.operation }
|
||||
}
|
||||
}
|
||||
|
||||
@ -234,10 +210,10 @@ struct DeallocOp {
|
||||
}
|
||||
|
||||
impl AllocHistory {
|
||||
pub fn new(id: AllocId, item: Item, current_span: &mut CurrentSpan<'_, '_, '_>) -> Self {
|
||||
pub fn new(id: AllocId, item: Item, machine: &MiriMachine<'_, '_>) -> Self {
|
||||
Self {
|
||||
id,
|
||||
base: (item, current_span.get()),
|
||||
base: (item, machine.current_span()),
|
||||
creations: SmallVec::new(),
|
||||
invalidations: SmallVec::new(),
|
||||
protectors: SmallVec::new(),
|
||||
@ -245,7 +221,7 @@ impl AllocHistory {
|
||||
}
|
||||
}
|
||||
|
||||
impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir, 'tcx> {
|
||||
impl<'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'history, 'ecx, 'mir, 'tcx> {
|
||||
pub fn start_grant(&mut self, perm: Permission) {
|
||||
let Operation::Retag(op) = &mut self.operation else {
|
||||
unreachable!("start_grant must only be called during a retag, this is: {:?}", self.operation)
|
||||
@ -274,21 +250,27 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir
|
||||
let Operation::Retag(op) = &self.operation else {
|
||||
unreachable!("log_creation must only be called during a retag")
|
||||
};
|
||||
self.history.creations.push(Creation { retag: op.clone(), span: self.current_span.get() });
|
||||
self.history
|
||||
.creations
|
||||
.push(Creation { retag: op.clone(), span: self.machine.current_span() });
|
||||
}
|
||||
|
||||
pub fn log_invalidation(&mut self, tag: SbTag) {
|
||||
let mut span = self.current_span.get();
|
||||
let mut span = self.machine.current_span();
|
||||
let (range, cause) = match &self.operation {
|
||||
Operation::Retag(RetagOp { cause, range, permission, .. }) => {
|
||||
if *cause == RetagCause::FnEntry {
|
||||
span = self.current_span.get_caller();
|
||||
span = self.machine.caller_span();
|
||||
}
|
||||
(*range, InvalidationCause::Retag(permission.unwrap(), *cause))
|
||||
}
|
||||
Operation::Access(AccessOp { kind, range, .. }) =>
|
||||
(*range, InvalidationCause::Access(*kind)),
|
||||
_ => unreachable!("Tags can only be invalidated during a retag or access"),
|
||||
Operation::Dealloc(_) => {
|
||||
// This can be reached, but never be relevant later since the entire allocation is
|
||||
// gone now.
|
||||
return;
|
||||
}
|
||||
};
|
||||
self.history.invalidations.push(Invalidation { tag, range, span, cause });
|
||||
}
|
||||
@ -297,7 +279,9 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir
|
||||
let Operation::Retag(op) = &self.operation else {
|
||||
unreachable!("Protectors can only be created during a retag")
|
||||
};
|
||||
self.history.protectors.push(Protection { tag: op.new_tag, span: self.current_span.get() });
|
||||
self.history
|
||||
.protectors
|
||||
.push(Protection { tag: op.new_tag, span: self.machine.current_span() });
|
||||
}
|
||||
|
||||
pub fn get_logs_relevant_to(
|
||||
@ -369,10 +353,12 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir
|
||||
|
||||
/// Report a descriptive error when `new` could not be granted from `derived_from`.
|
||||
#[inline(never)] // This is only called on fatal code paths
|
||||
pub fn grant_error(&self, perm: Permission, stack: &Stack) -> InterpError<'tcx> {
|
||||
pub(super) fn grant_error(&self, stack: &Stack) -> InterpError<'tcx> {
|
||||
let Operation::Retag(op) = &self.operation else {
|
||||
unreachable!("grant_error should only be called during a retag")
|
||||
};
|
||||
let perm =
|
||||
op.permission.expect("`start_grant` must be called before calling `grant_error`");
|
||||
let action = format!(
|
||||
"trying to retag from {:?} for {:?} permission at {:?}[{:#x}]",
|
||||
op.orig_tag,
|
||||
@ -389,9 +375,12 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir
|
||||
|
||||
/// Report a descriptive error when `access` is not permitted based on `tag`.
|
||||
#[inline(never)] // This is only called on fatal code paths
|
||||
pub fn access_error(&self, stack: &Stack) -> InterpError<'tcx> {
|
||||
let Operation::Access(op) = &self.operation else {
|
||||
unreachable!("access_error should only be called during an access")
|
||||
pub(super) fn access_error(&self, stack: &Stack) -> InterpError<'tcx> {
|
||||
// Deallocation and retagging also do an access as part of their thing, so handle that here, too.
|
||||
let op = match &self.operation {
|
||||
Operation::Access(op) => op,
|
||||
Operation::Retag(_) => return self.grant_error(stack),
|
||||
Operation::Dealloc(_) => return self.dealloc_error(stack),
|
||||
};
|
||||
let action = format!(
|
||||
"attempting a {access} using {tag:?} at {alloc_id:?}[{offset:#x}]",
|
||||
@ -408,8 +397,13 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir
|
||||
}
|
||||
|
||||
#[inline(never)] // This is only called on fatal code paths
|
||||
pub fn protector_error(&self, item: &Item) -> InterpError<'tcx> {
|
||||
pub(super) fn protector_error(&self, item: &Item, kind: ProtectorKind) -> InterpError<'tcx> {
|
||||
let protected = match kind {
|
||||
ProtectorKind::WeakProtector => "weakly protected",
|
||||
ProtectorKind::StrongProtector => "strongly protected",
|
||||
};
|
||||
let call_id = self
|
||||
.machine
|
||||
.threads
|
||||
.all_stacks()
|
||||
.flatten()
|
||||
@ -422,10 +416,7 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir
|
||||
match self.operation {
|
||||
Operation::Dealloc(_) =>
|
||||
err_sb_ub(
|
||||
format!(
|
||||
"deallocating while item {:?} is protected by call {:?}",
|
||||
item, call_id
|
||||
),
|
||||
format!("deallocating while item {item:?} is {protected} by call {call_id:?}",),
|
||||
None,
|
||||
None,
|
||||
),
|
||||
@ -433,8 +424,7 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir
|
||||
| Operation::Access(AccessOp { tag, .. }) =>
|
||||
err_sb_ub(
|
||||
format!(
|
||||
"not granting access to tag {:?} because that would remove {:?} which is protected because it is an argument of call {:?}",
|
||||
tag, item, call_id
|
||||
"not granting access to tag {tag:?} because that would remove {item:?} which is {protected} because it is an argument of call {call_id:?}",
|
||||
),
|
||||
None,
|
||||
tag.and_then(|tag| self.get_logs_relevant_to(tag, Some(item.tag()))),
|
||||
@ -443,14 +433,16 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir
|
||||
}
|
||||
|
||||
#[inline(never)] // This is only called on fatal code paths
|
||||
pub fn dealloc_error(&self) -> InterpError<'tcx> {
|
||||
pub fn dealloc_error(&self, stack: &Stack) -> InterpError<'tcx> {
|
||||
let Operation::Dealloc(op) = &self.operation else {
|
||||
unreachable!("dealloc_error should only be called during a deallocation")
|
||||
};
|
||||
err_sb_ub(
|
||||
format!(
|
||||
"no item granting write access for deallocation to tag {:?} at {:?} found in borrow stack",
|
||||
op.tag, self.history.id,
|
||||
"attempting deallocation using {tag:?} at {alloc_id:?}{cause}",
|
||||
tag = op.tag,
|
||||
alloc_id = self.history.id,
|
||||
cause = error_cause(stack, op.tag),
|
||||
),
|
||||
None,
|
||||
op.tag.and_then(|tag| self.get_logs_relevant_to(tag, None)),
|
||||
@ -478,9 +470,7 @@ impl<'span, 'history, 'ecx, 'mir, 'tcx> DiagnosticCx<'span, 'history, 'ecx, 'mir
|
||||
Some((orig_tag, kind))
|
||||
}
|
||||
};
|
||||
self.current_span
|
||||
.machine()
|
||||
.emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, summary));
|
||||
self.machine.emit_diagnostic(NonHaltingDiagnostic::PoppedPointerTag(*item, summary));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -65,7 +65,7 @@ pub struct FrameExtra {
|
||||
/// incremental updates of the global list of protected tags stored in the
|
||||
/// `stacked_borrows::GlobalState` upon function return, and if we attempt to pop a protected
|
||||
/// tag, to identify which call is responsible for protecting the tag.
|
||||
/// See `Stack::item_popped` for more explanation.
|
||||
/// See `Stack::item_invalidated` for more explanation.
|
||||
///
|
||||
/// This will contain one tag per reference passed to the function, so
|
||||
/// a size of 2 is enough for the vast majority of functions.
|
||||
@ -91,6 +91,26 @@ pub struct Stacks {
|
||||
modified_since_last_gc: bool,
|
||||
}
|
||||
|
||||
/// The flavor of the protector.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
enum ProtectorKind {
|
||||
/// Protected against aliasing violations from other pointers.
|
||||
///
|
||||
/// Items protected like this cause UB when they are invalidated, *but* the pointer itself may
|
||||
/// still be used to issue a deallocation.
|
||||
///
|
||||
/// This is required for LLVM IR pointers that are `noalias` but *not* `dereferenceable`.
|
||||
WeakProtector,
|
||||
|
||||
/// Protected against any kind of invalidation.
|
||||
///
|
||||
/// Items protected like this cause UB when they are invalidated or the memory is deallocated.
|
||||
/// This is strictly stronger protection than `WeakProtector`.
|
||||
///
|
||||
/// This is required for LLVM IR pointers that are `dereferenceable` (and also allows `noalias`).
|
||||
StrongProtector,
|
||||
}
|
||||
|
||||
/// Extra global state, available to the memory access hooks.
|
||||
#[derive(Debug)]
|
||||
pub struct GlobalStateInner {
|
||||
@ -102,12 +122,12 @@ pub struct GlobalStateInner {
|
||||
base_ptr_tags: FxHashMap<AllocId, SbTag>,
|
||||
/// Next unused call ID (for protectors).
|
||||
next_call_id: CallId,
|
||||
/// All currently protected tags.
|
||||
/// All currently protected tags, and the status of their protection.
|
||||
/// An item is protected if its tag is in this set, *and* it has the "protected" bit set.
|
||||
/// We add tags to this when they are created with a protector in `reborrow`, and
|
||||
/// we remove tags from this when the call which is protecting them returns, in
|
||||
/// `GlobalStateInner::end_call`. See `Stack::item_popped` for more details.
|
||||
protected_tags: FxHashSet<SbTag>,
|
||||
/// `GlobalStateInner::end_call`. See `Stack::item_invalidated` for more details.
|
||||
protected_tags: FxHashMap<SbTag, ProtectorKind>,
|
||||
/// The pointer ids to trace
|
||||
tracked_pointer_tags: FxHashSet<SbTag>,
|
||||
/// The call ids to trace
|
||||
@ -189,7 +209,7 @@ impl GlobalStateInner {
|
||||
next_ptr_tag: SbTag(NonZeroU64::new(1).unwrap()),
|
||||
base_ptr_tags: FxHashMap::default(),
|
||||
next_call_id: NonZeroU64::new(1).unwrap(),
|
||||
protected_tags: FxHashSet::default(),
|
||||
protected_tags: FxHashMap::default(),
|
||||
tracked_pointer_tags,
|
||||
tracked_call_ids,
|
||||
retag_fields,
|
||||
@ -272,6 +292,13 @@ impl Permission {
|
||||
}
|
||||
}
|
||||
|
||||
/// Determines whether an item was invalidated by a conflicting access, or by deallocation.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
enum ItemInvalidationCause {
|
||||
Conflict,
|
||||
Dealloc,
|
||||
}
|
||||
|
||||
/// Core per-location operations: access, dealloc, reborrow.
|
||||
impl<'tcx> Stack {
|
||||
/// Find the first write-incompatible item above the given one --
|
||||
@ -310,10 +337,11 @@ impl<'tcx> Stack {
|
||||
/// Within `provoking_access, the `AllocRange` refers the entire operation, and
|
||||
/// the `Size` refers to the specific location in the `AllocRange` that we are
|
||||
/// currently checking.
|
||||
fn item_popped(
|
||||
fn item_invalidated(
|
||||
item: &Item,
|
||||
global: &GlobalStateInner,
|
||||
dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>,
|
||||
dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
|
||||
cause: ItemInvalidationCause,
|
||||
) -> InterpResult<'tcx> {
|
||||
if !global.tracked_pointer_tags.is_empty() {
|
||||
dcx.check_tracked_tag_popped(item, global);
|
||||
@ -336,8 +364,14 @@ impl<'tcx> Stack {
|
||||
// 2. Most frames protect only one or two tags. So this duplicative global turns a search
|
||||
// which ends up about linear in the number of protected tags in the program into a
|
||||
// constant time check (and a slow linear, because the tags in the frames aren't contiguous).
|
||||
if global.protected_tags.contains(&item.tag()) {
|
||||
return Err(dcx.protector_error(item).into());
|
||||
if let Some(&protector_kind) = global.protected_tags.get(&item.tag()) {
|
||||
// The only way this is okay is if the protector is weak and we are deallocating with
|
||||
// the right pointer.
|
||||
let allowed = matches!(cause, ItemInvalidationCause::Dealloc)
|
||||
&& matches!(protector_kind, ProtectorKind::WeakProtector);
|
||||
if !allowed {
|
||||
return Err(dcx.protector_error(item, protector_kind).into());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@ -350,15 +384,15 @@ impl<'tcx> Stack {
|
||||
&mut self,
|
||||
access: AccessKind,
|
||||
tag: ProvenanceExtra,
|
||||
global: &mut GlobalStateInner,
|
||||
dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>,
|
||||
global: &GlobalStateInner,
|
||||
dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
|
||||
exposed_tags: &FxHashSet<SbTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Two main steps: Find granting item, remove incompatible items above.
|
||||
|
||||
// Step 1: Find granting item.
|
||||
let granting_idx =
|
||||
self.find_granting(access, tag, exposed_tags).map_err(|_| dcx.access_error(self))?;
|
||||
self.find_granting(access, tag, exposed_tags).map_err(|()| dcx.access_error(self))?;
|
||||
|
||||
// Step 2: Remove incompatible items above them. Make sure we do not remove protected
|
||||
// items. Behavior differs for reads and writes.
|
||||
@ -377,7 +411,7 @@ impl<'tcx> Stack {
|
||||
0
|
||||
};
|
||||
self.pop_items_after(first_incompatible_idx, |item| {
|
||||
Stack::item_popped(&item, global, dcx)?;
|
||||
Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
|
||||
dcx.log_invalidation(item.tag());
|
||||
Ok(())
|
||||
})?;
|
||||
@ -398,7 +432,7 @@ impl<'tcx> Stack {
|
||||
0
|
||||
};
|
||||
self.disable_uniques_starting_at(first_incompatible_idx, |item| {
|
||||
Stack::item_popped(&item, global, dcx)?;
|
||||
Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Conflict)?;
|
||||
dcx.log_invalidation(item.tag());
|
||||
Ok(())
|
||||
})?;
|
||||
@ -437,56 +471,59 @@ impl<'tcx> Stack {
|
||||
&mut self,
|
||||
tag: ProvenanceExtra,
|
||||
global: &GlobalStateInner,
|
||||
dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>,
|
||||
dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
|
||||
exposed_tags: &FxHashSet<SbTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Step 1: Make sure there is a granting item.
|
||||
self.find_granting(AccessKind::Write, tag, exposed_tags)
|
||||
.map_err(|_| dcx.dealloc_error())?;
|
||||
// Step 1: Make a write access.
|
||||
// As part of this we do regular protector checking, i.e. even weakly protected items cause UB when popped.
|
||||
self.access(AccessKind::Write, tag, global, dcx, exposed_tags)?;
|
||||
|
||||
// Step 2: Consider all items removed. This checks for protectors.
|
||||
// Step 2: Pretend we remove the remaining items, checking if any are strongly protected.
|
||||
for idx in (0..self.len()).rev() {
|
||||
let item = self.get(idx).unwrap();
|
||||
Stack::item_popped(&item, global, dcx)?;
|
||||
Stack::item_invalidated(&item, global, dcx, ItemInvalidationCause::Dealloc)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Derive a new pointer from one with the given tag.
|
||||
/// `weak` controls whether this operation is weak or strong: weak granting does not act as
|
||||
/// an access, and they add the new item directly on top of the one it is derived
|
||||
/// from instead of all the way at the top of the stack.
|
||||
/// `range` refers the entire operation, and `offset` refers to the specific location in
|
||||
/// `range` that we are currently checking.
|
||||
///
|
||||
/// `access` indicates which kind of memory access this retag itself should correspond to.
|
||||
fn grant(
|
||||
&mut self,
|
||||
derived_from: ProvenanceExtra,
|
||||
new: Item,
|
||||
global: &mut GlobalStateInner,
|
||||
dcx: &mut DiagnosticCx<'_, '_, '_, '_, 'tcx>,
|
||||
access: Option<AccessKind>,
|
||||
global: &GlobalStateInner,
|
||||
dcx: &mut DiagnosticCx<'_, '_, '_, 'tcx>,
|
||||
exposed_tags: &FxHashSet<SbTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
dcx.start_grant(new.perm());
|
||||
|
||||
// Figure out which access `perm` corresponds to.
|
||||
let access =
|
||||
if new.perm().grants(AccessKind::Write) { AccessKind::Write } else { AccessKind::Read };
|
||||
|
||||
// Now we figure out which item grants our parent (`derived_from`) this kind of access.
|
||||
// We use that to determine where to put the new item.
|
||||
let granting_idx = self
|
||||
.find_granting(access, derived_from, exposed_tags)
|
||||
.map_err(|_| dcx.grant_error(new.perm(), self))?;
|
||||
|
||||
// Compute where to put the new item.
|
||||
// Either way, we ensure that we insert the new item in a way such that between
|
||||
// `derived_from` and the new one, there are only items *compatible with* `derived_from`.
|
||||
let new_idx = if new.perm() == Permission::SharedReadWrite {
|
||||
assert!(
|
||||
access == AccessKind::Write,
|
||||
"this case only makes sense for stack-like accesses"
|
||||
);
|
||||
let new_idx = if let Some(access) = access {
|
||||
// Simple case: We are just a regular memory access, and then push our thing on top,
|
||||
// like a regular stack.
|
||||
// This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
|
||||
self.access(access, derived_from, global, dcx, exposed_tags)?;
|
||||
|
||||
// We insert "as far up as possible": We know only compatible items are remaining
|
||||
// on top of `derived_from`, and we want the new item at the top so that we
|
||||
// get the strongest possible guarantees.
|
||||
// This ensures U1 and F1.
|
||||
self.len()
|
||||
} else {
|
||||
// The tricky case: creating a new SRW permission without actually being an access.
|
||||
assert!(new.perm() == Permission::SharedReadWrite);
|
||||
|
||||
// First we figure out which item grants our parent (`derived_from`) this kind of access.
|
||||
// We use that to determine where to put the new item.
|
||||
let granting_idx = self
|
||||
.find_granting(AccessKind::Write, derived_from, exposed_tags)
|
||||
.map_err(|()| dcx.grant_error(self))?;
|
||||
|
||||
let (Some(granting_idx), ProvenanceExtra::Concrete(_)) = (granting_idx, derived_from) else {
|
||||
// The parent is a wildcard pointer or matched the unknown bottom.
|
||||
@ -503,17 +540,6 @@ impl<'tcx> Stack {
|
||||
// be popped to (i.e., we insert it above all the write-compatible items).
|
||||
// This ensures F2b by adding the new item below any potentially existing `SharedReadOnly`.
|
||||
self.find_first_write_incompatible(granting_idx)
|
||||
} else {
|
||||
// A "safe" reborrow for a pointer that actually expects some aliasing guarantees.
|
||||
// Here, creating a reference actually counts as an access.
|
||||
// This ensures F2b for `Unique`, by removing offending `SharedReadOnly`.
|
||||
self.access(access, derived_from, global, dcx, exposed_tags)?;
|
||||
|
||||
// We insert "as far up as possible": We know only compatible items are remaining
|
||||
// on top of `derived_from`, and we want the new item at the top so that we
|
||||
// get the strongest possible guarantees.
|
||||
// This ensures U1 and F1.
|
||||
self.len()
|
||||
};
|
||||
|
||||
// Put the new item there.
|
||||
@ -555,14 +581,14 @@ impl<'tcx> Stacks {
|
||||
perm: Permission,
|
||||
tag: SbTag,
|
||||
id: AllocId,
|
||||
current_span: &mut CurrentSpan<'_, '_, '_>,
|
||||
machine: &MiriMachine<'_, '_>,
|
||||
) -> Self {
|
||||
let item = Item::new(tag, perm, false);
|
||||
let stack = Stack::new(item);
|
||||
|
||||
Stacks {
|
||||
stacks: RangeMap::new(size, stack),
|
||||
history: AllocHistory::new(id, item, current_span),
|
||||
history: AllocHistory::new(id, item, machine),
|
||||
exposed_tags: FxHashSet::default(),
|
||||
modified_since_last_gc: false,
|
||||
}
|
||||
@ -572,10 +598,10 @@ impl<'tcx> Stacks {
|
||||
fn for_each(
|
||||
&mut self,
|
||||
range: AllocRange,
|
||||
mut dcx_builder: DiagnosticCxBuilder<'_, '_, '_, 'tcx>,
|
||||
mut dcx_builder: DiagnosticCxBuilder<'_, '_, 'tcx>,
|
||||
mut f: impl FnMut(
|
||||
&mut Stack,
|
||||
&mut DiagnosticCx<'_, '_, '_, '_, 'tcx>,
|
||||
&mut DiagnosticCx<'_, '_, '_, 'tcx>,
|
||||
&mut FxHashSet<SbTag>,
|
||||
) -> InterpResult<'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
@ -596,7 +622,7 @@ impl Stacks {
|
||||
size: Size,
|
||||
state: &GlobalState,
|
||||
kind: MemoryKind<MiriMemoryKind>,
|
||||
mut current_span: CurrentSpan<'_, '_, '_>,
|
||||
machine: &MiriMachine<'_, '_>,
|
||||
) -> Self {
|
||||
let mut extra = state.borrow_mut();
|
||||
let (base_tag, perm) = match kind {
|
||||
@ -605,12 +631,11 @@ impl Stacks {
|
||||
// not through a pointer). That is, whenever we directly write to a local, this will pop
|
||||
// everything else off the stack, invalidating all previous pointers,
|
||||
// and in particular, *all* raw pointers.
|
||||
MemoryKind::Stack =>
|
||||
(extra.base_ptr_tag(id, current_span.machine()), Permission::Unique),
|
||||
MemoryKind::Stack => (extra.base_ptr_tag(id, machine), Permission::Unique),
|
||||
// Everything else is shared by default.
|
||||
_ => (extra.base_ptr_tag(id, current_span.machine()), Permission::SharedReadWrite),
|
||||
_ => (extra.base_ptr_tag(id, machine), Permission::SharedReadWrite),
|
||||
};
|
||||
Stacks::new(size, perm, base_tag, id, &mut current_span)
|
||||
Stacks::new(size, perm, base_tag, id, machine)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
@ -619,9 +644,7 @@ impl Stacks {
|
||||
alloc_id: AllocId,
|
||||
tag: ProvenanceExtra,
|
||||
range: AllocRange,
|
||||
state: &GlobalState,
|
||||
mut current_span: CurrentSpan<'ecx, 'mir, 'tcx>,
|
||||
threads: &'ecx ThreadManager<'mir, 'tcx>,
|
||||
machine: &'ecx MiriMachine<'mir, 'tcx>,
|
||||
) -> InterpResult<'tcx>
|
||||
where
|
||||
'tcx: 'ecx,
|
||||
@ -632,22 +655,20 @@ impl Stacks {
|
||||
Pointer::new(alloc_id, range.start),
|
||||
range.size.bytes()
|
||||
);
|
||||
let dcx = DiagnosticCxBuilder::read(&mut current_span, threads, tag, range);
|
||||
let mut state = state.borrow_mut();
|
||||
let dcx = DiagnosticCxBuilder::read(machine, tag, range);
|
||||
let state = machine.stacked_borrows.as_ref().unwrap().borrow();
|
||||
self.for_each(range, dcx, |stack, dcx, exposed_tags| {
|
||||
stack.access(AccessKind::Read, tag, &mut state, dcx, exposed_tags)
|
||||
stack.access(AccessKind::Read, tag, &state, dcx, exposed_tags)
|
||||
})
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn before_memory_write<'tcx, 'mir, 'ecx>(
|
||||
pub fn before_memory_write<'tcx>(
|
||||
&mut self,
|
||||
alloc_id: AllocId,
|
||||
tag: ProvenanceExtra,
|
||||
range: AllocRange,
|
||||
state: &GlobalState,
|
||||
mut current_span: CurrentSpan<'ecx, 'mir, 'tcx>,
|
||||
threads: &'ecx ThreadManager<'mir, 'tcx>,
|
||||
machine: &mut MiriMachine<'_, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!(
|
||||
"write access with tag {:?}: {:?}, size {}",
|
||||
@ -655,26 +676,24 @@ impl Stacks {
|
||||
Pointer::new(alloc_id, range.start),
|
||||
range.size.bytes()
|
||||
);
|
||||
let dcx = DiagnosticCxBuilder::write(&mut current_span, threads, tag, range);
|
||||
let mut state = state.borrow_mut();
|
||||
let dcx = DiagnosticCxBuilder::write(machine, tag, range);
|
||||
let state = machine.stacked_borrows.as_ref().unwrap().borrow();
|
||||
self.for_each(range, dcx, |stack, dcx, exposed_tags| {
|
||||
stack.access(AccessKind::Write, tag, &mut state, dcx, exposed_tags)
|
||||
stack.access(AccessKind::Write, tag, &state, dcx, exposed_tags)
|
||||
})
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn before_memory_deallocation<'tcx, 'mir, 'ecx>(
|
||||
pub fn before_memory_deallocation<'tcx>(
|
||||
&mut self,
|
||||
alloc_id: AllocId,
|
||||
tag: ProvenanceExtra,
|
||||
range: AllocRange,
|
||||
state: &GlobalState,
|
||||
mut current_span: CurrentSpan<'ecx, 'mir, 'tcx>,
|
||||
threads: &'ecx ThreadManager<'mir, 'tcx>,
|
||||
machine: &mut MiriMachine<'_, 'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!("deallocation with tag {:?}: {:?}, size {}", tag, alloc_id, range.size.bytes());
|
||||
let dcx = DiagnosticCxBuilder::dealloc(&mut current_span, threads, tag);
|
||||
let state = state.borrow();
|
||||
let dcx = DiagnosticCxBuilder::dealloc(machine, tag);
|
||||
let state = machine.stacked_borrows.as_ref().unwrap().borrow();
|
||||
self.for_each(range, dcx, |stack, dcx, exposed_tags| {
|
||||
stack.dealloc(tag, &state, dcx, exposed_tags)
|
||||
})?;
|
||||
@ -698,7 +717,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
|
||||
kind: RefKind,
|
||||
retag_cause: RetagCause, // What caused this retag, for diagnostics only
|
||||
new_tag: SbTag,
|
||||
protect: bool,
|
||||
protect: Option<ProtectorKind>,
|
||||
) -> InterpResult<'tcx, Option<AllocId>> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
@ -738,7 +757,6 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
|
||||
let (_size, _align, alloc_kind) = this.get_alloc_info(alloc_id);
|
||||
match alloc_kind {
|
||||
AllocKind::LiveData => {
|
||||
let current_span = &mut this.machine.current_span();
|
||||
// This should have alloc_extra data, but `get_alloc_extra` can still fail
|
||||
// if converting this alloc_id from a global to a local one
|
||||
// uncovers a non-supported `extern static`.
|
||||
@ -748,12 +766,10 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
|
||||
.as_ref()
|
||||
.expect("we should have Stacked Borrows data")
|
||||
.borrow_mut();
|
||||
let threads = &this.machine.threads;
|
||||
// Note that we create a *second* `DiagnosticCxBuilder` below for the actual retag.
|
||||
// FIXME: can this be done cleaner?
|
||||
let dcx = DiagnosticCxBuilder::retag(
|
||||
current_span,
|
||||
threads,
|
||||
&this.machine,
|
||||
retag_cause,
|
||||
new_tag,
|
||||
orig_tag,
|
||||
@ -761,7 +777,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
|
||||
);
|
||||
let mut dcx = dcx.build(&mut stacked_borrows.history, base_offset);
|
||||
dcx.log_creation();
|
||||
if protect {
|
||||
if protect.is_some() {
|
||||
dcx.log_protector();
|
||||
}
|
||||
}
|
||||
@ -821,70 +837,89 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
|
||||
size.bytes()
|
||||
);
|
||||
|
||||
if protect {
|
||||
// See comment in `Stack::item_popped` for why we store the tag twice.
|
||||
if let Some(protect) = protect {
|
||||
// See comment in `Stack::item_invalidated` for why we store the tag twice.
|
||||
this.frame_mut().extra.stacked_borrows.as_mut().unwrap().protected_tags.push(new_tag);
|
||||
this.machine.stacked_borrows.as_mut().unwrap().get_mut().protected_tags.insert(new_tag);
|
||||
this.machine
|
||||
.stacked_borrows
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.get_mut()
|
||||
.protected_tags
|
||||
.insert(new_tag, protect);
|
||||
}
|
||||
|
||||
// Update the stacks.
|
||||
// Make sure that raw pointers and mutable shared references are reborrowed "weak":
|
||||
// There could be existing unique pointers reborrowed from them that should remain valid!
|
||||
let perm = match kind {
|
||||
RefKind::Unique { two_phase: false }
|
||||
if place.layout.ty.is_unpin(*this.tcx, this.param_env()) =>
|
||||
{
|
||||
// Only if the type is unpin do we actually enforce uniqueness
|
||||
Permission::Unique
|
||||
let (perm, access) = match kind {
|
||||
RefKind::Unique { two_phase } => {
|
||||
// Permission is Unique only if the type is `Unpin` and this is not twophase
|
||||
let perm = if !two_phase && place.layout.ty.is_unpin(*this.tcx, this.param_env()) {
|
||||
Permission::Unique
|
||||
} else {
|
||||
Permission::SharedReadWrite
|
||||
};
|
||||
// We do an access for all full borrows, even if `!Unpin`.
|
||||
let access = if !two_phase { Some(AccessKind::Write) } else { None };
|
||||
(perm, access)
|
||||
}
|
||||
RefKind::Unique { .. } => {
|
||||
// Two-phase references and !Unpin references are treated as SharedReadWrite
|
||||
Permission::SharedReadWrite
|
||||
RefKind::Raw { mutable: true } => {
|
||||
// Creating a raw ptr does not count as an access
|
||||
(Permission::SharedReadWrite, None)
|
||||
}
|
||||
RefKind::Raw { mutable: true } => Permission::SharedReadWrite,
|
||||
RefKind::Shared | RefKind::Raw { mutable: false } => {
|
||||
// Shared references and *const are a whole different kind of game, the
|
||||
// permission is not uniform across the entire range!
|
||||
// We need a frozen-sensitive reborrow.
|
||||
// We have to use shared references to alloc/memory_extra here since
|
||||
// `visit_freeze_sensitive` needs to access the global state.
|
||||
let extra = this.get_alloc_extra(alloc_id)?;
|
||||
let mut stacked_borrows = extra
|
||||
let alloc_extra = this.get_alloc_extra(alloc_id)?;
|
||||
let mut stacked_borrows = alloc_extra
|
||||
.stacked_borrows
|
||||
.as_ref()
|
||||
.expect("we should have Stacked Borrows data")
|
||||
.borrow_mut();
|
||||
// FIXME: can't share this with the current_span inside log_creation
|
||||
let mut current_span = this.machine.current_span();
|
||||
this.visit_freeze_sensitive(place, size, |mut range, frozen| {
|
||||
// Adjust range.
|
||||
range.start += base_offset;
|
||||
// We are only ever `SharedReadOnly` inside the frozen bits.
|
||||
let perm = if frozen {
|
||||
Permission::SharedReadOnly
|
||||
let (perm, access) = if frozen {
|
||||
(Permission::SharedReadOnly, Some(AccessKind::Read))
|
||||
} else {
|
||||
Permission::SharedReadWrite
|
||||
// Inside UnsafeCell, this does *not* count as an access, as there
|
||||
// might actually be mutable references further up the stack that
|
||||
// we have to keep alive.
|
||||
(Permission::SharedReadWrite, None)
|
||||
};
|
||||
let protected = if frozen {
|
||||
protect
|
||||
protect.is_some()
|
||||
} else {
|
||||
// We do not protect inside UnsafeCell.
|
||||
// This fixes https://github.com/rust-lang/rust/issues/55005.
|
||||
false
|
||||
};
|
||||
let item = Item::new(new_tag, perm, protected);
|
||||
let mut global = this.machine.stacked_borrows.as_ref().unwrap().borrow_mut();
|
||||
let global = this.machine.stacked_borrows.as_ref().unwrap().borrow();
|
||||
let dcx = DiagnosticCxBuilder::retag(
|
||||
&mut current_span, // FIXME avoid this `clone`
|
||||
&this.machine.threads,
|
||||
&this.machine,
|
||||
retag_cause,
|
||||
new_tag,
|
||||
orig_tag,
|
||||
alloc_range(base_offset, size),
|
||||
);
|
||||
stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
|
||||
stack.grant(orig_tag, item, &mut global, dcx, exposed_tags)
|
||||
})
|
||||
stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
|
||||
})?;
|
||||
drop(global);
|
||||
if let Some(access) = access {
|
||||
assert_eq!(access, AccessKind::Read);
|
||||
// Make sure the data race model also knows about this.
|
||||
if let Some(data_race) = alloc_extra.data_race.as_ref() {
|
||||
data_race.read(alloc_id, range, &this.machine)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
return Ok(Some(alloc_id));
|
||||
}
|
||||
@ -894,27 +929,32 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
|
||||
// Note that this asserts that the allocation is mutable -- but since we are creating a
|
||||
// mutable pointer, that seems reasonable.
|
||||
let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc_id)?;
|
||||
let mut stacked_borrows = alloc_extra
|
||||
let stacked_borrows = alloc_extra
|
||||
.stacked_borrows
|
||||
.as_mut()
|
||||
.expect("we should have Stacked Borrows data")
|
||||
.borrow_mut();
|
||||
let item = Item::new(new_tag, perm, protect);
|
||||
.get_mut();
|
||||
let item = Item::new(new_tag, perm, protect.is_some());
|
||||
let range = alloc_range(base_offset, size);
|
||||
let mut global = machine.stacked_borrows.as_ref().unwrap().borrow_mut();
|
||||
// FIXME: can't share this with the current_span inside log_creation
|
||||
let current_span = &mut machine.current_span();
|
||||
let global = machine.stacked_borrows.as_ref().unwrap().borrow();
|
||||
let dcx = DiagnosticCxBuilder::retag(
|
||||
current_span,
|
||||
&machine.threads,
|
||||
machine,
|
||||
retag_cause,
|
||||
new_tag,
|
||||
orig_tag,
|
||||
alloc_range(base_offset, size),
|
||||
);
|
||||
stacked_borrows.for_each(range, dcx, |stack, dcx, exposed_tags| {
|
||||
stack.grant(orig_tag, item, &mut global, dcx, exposed_tags)
|
||||
stack.grant(orig_tag, item, access, &global, dcx, exposed_tags)
|
||||
})?;
|
||||
drop(global);
|
||||
if let Some(access) = access {
|
||||
assert_eq!(access, AccessKind::Write);
|
||||
// Make sure the data race model also knows about this.
|
||||
if let Some(data_race) = alloc_extra.data_race.as_mut() {
|
||||
data_race.write(alloc_id, range, machine)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Some(alloc_id))
|
||||
}
|
||||
@ -926,7 +966,7 @@ trait EvalContextPrivExt<'mir: 'ecx, 'tcx: 'mir, 'ecx>: crate::MiriInterpCxExt<'
|
||||
val: &ImmTy<'tcx, Provenance>,
|
||||
kind: RefKind,
|
||||
retag_cause: RetagCause, // What caused this retag, for diagnostics only
|
||||
protect: bool,
|
||||
protect: Option<ProtectorKind>,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
|
||||
let this = self.eval_context_mut();
|
||||
// We want a place for where the ptr *points to*, so we get one.
|
||||
@ -996,7 +1036,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
place: &PlaceTy<'tcx, Provenance>,
|
||||
ref_kind: RefKind,
|
||||
retag_cause: RetagCause,
|
||||
protector: bool,
|
||||
protector: Option<ProtectorKind>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let val = self.ecx.read_immediate(&self.ecx.place_to_op(place)?)?;
|
||||
let val = self.ecx.retag_reference(&val, ref_kind, retag_cause, protector)?;
|
||||
@ -1015,13 +1055,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
}
|
||||
|
||||
fn visit_box(&mut self, place: &PlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
|
||||
// Boxes do not get a protector: protectors reflect that references outlive the call
|
||||
// they were passed in to; that's just not the case for boxes.
|
||||
// Boxes get a weak protectors, since they may be deallocated.
|
||||
self.retag_place(
|
||||
place,
|
||||
RefKind::Unique { two_phase: false },
|
||||
self.retag_cause,
|
||||
/*protector*/ false,
|
||||
/*protector*/
|
||||
(self.kind == RetagKind::FnEntry).then_some(ProtectorKind::WeakProtector),
|
||||
)
|
||||
}
|
||||
|
||||
@ -1046,7 +1086,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
place,
|
||||
ref_kind,
|
||||
self.retag_cause,
|
||||
/*protector*/ self.kind == RetagKind::FnEntry,
|
||||
/*protector*/
|
||||
(self.kind == RetagKind::FnEntry)
|
||||
.then_some(ProtectorKind::StrongProtector),
|
||||
)?;
|
||||
}
|
||||
ty::RawPtr(tym) => {
|
||||
@ -1059,7 +1101,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
place,
|
||||
RefKind::Raw { mutable: tym.mutbl == Mutability::Mut },
|
||||
self.retag_cause,
|
||||
/*protector*/ false,
|
||||
/*protector*/ None,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
@ -1110,12 +1152,12 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
// (The pointer type does not matter, so we use a raw pointer.)
|
||||
let ptr_layout = this.layout_of(this.tcx.mk_mut_ptr(return_place.layout.ty))?;
|
||||
let val = ImmTy::from_immediate(return_place.to_ref(this), ptr_layout);
|
||||
// Reborrow it.
|
||||
// Reborrow it. With protection! That is part of the point.
|
||||
let val = this.retag_reference(
|
||||
&val,
|
||||
RefKind::Unique { two_phase: false },
|
||||
RetagCause::FnReturn,
|
||||
/*protector*/ true,
|
||||
/*protector*/ Some(ProtectorKind::StrongProtector),
|
||||
)?;
|
||||
// And use reborrowed pointer for return place.
|
||||
let return_place = this.ref_to_mplace(&val)?;
|
||||
|
@ -367,10 +367,10 @@ impl<'tcx> Stack {
|
||||
|
||||
/// Find all `Unique` elements in this borrow stack above `granting_idx`, pass a copy of them
|
||||
/// to the `visitor`, then set their `Permission` to `Disabled`.
|
||||
pub fn disable_uniques_starting_at<V: FnMut(Item) -> crate::InterpResult<'tcx>>(
|
||||
pub fn disable_uniques_starting_at(
|
||||
&mut self,
|
||||
disable_start: usize,
|
||||
mut visitor: V,
|
||||
mut visitor: impl FnMut(Item) -> crate::InterpResult<'tcx>,
|
||||
) -> crate::InterpResult<'tcx> {
|
||||
#[cfg(feature = "stack-cache")]
|
||||
let unique_range = self.unique_range.clone();
|
||||
|
@ -1,5 +1,6 @@
|
||||
[workspace]
|
||||
members = ["subcrate", "issue-1567", "exported-symbol-dep"]
|
||||
exclude = ["no-std-smoke"] # it wants to be panic="abort"
|
||||
|
||||
[package]
|
||||
name = "cargo-miri-test"
|
||||
|
7
src/tools/miri/test-cargo-miri/no-std-smoke/Cargo.lock
Normal file
7
src/tools/miri/test-cargo-miri/no-std-smoke/Cargo.lock
Normal file
@ -0,0 +1,7 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
|
||||
[[package]]
|
||||
name = "no-std-smoke"
|
||||
version = "0.1.0"
|
14
src/tools/miri/test-cargo-miri/no-std-smoke/Cargo.toml
Normal file
14
src/tools/miri/test-cargo-miri/no-std-smoke/Cargo.toml
Normal file
@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "no-std-smoke"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
|
||||
[profile.dev]
|
||||
panic = 'abort'
|
||||
|
||||
[profile.release]
|
||||
panic = 'abort'
|
34
src/tools/miri/test-cargo-miri/no-std-smoke/src/main.rs
Normal file
34
src/tools/miri/test-cargo-miri/no-std-smoke/src/main.rs
Normal file
@ -0,0 +1,34 @@
|
||||
// Copied from tests/pass/no-std.rs
|
||||
|
||||
#![feature(start)]
|
||||
#![no_std]
|
||||
|
||||
// Plumbing to let us use `writeln!` to host stdout:
|
||||
|
||||
extern "Rust" {
|
||||
fn miri_write_to_stdout(bytes: &[u8]);
|
||||
}
|
||||
|
||||
struct Host;
|
||||
|
||||
use core::fmt::Write;
|
||||
|
||||
impl Write for Host {
|
||||
fn write_str(&mut self, s: &str) -> core::fmt::Result {
|
||||
unsafe {
|
||||
miri_write_to_stdout(s.as_bytes());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[start]
|
||||
fn start(_: isize, _: *const *const u8) -> isize {
|
||||
writeln!(Host, "hello, world!").unwrap();
|
||||
0
|
||||
}
|
||||
|
||||
#[panic_handler]
|
||||
fn panic_handler(_: &core::panic::PanicInfo) -> ! {
|
||||
loop {}
|
||||
}
|
@ -16,9 +16,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.2.1"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db"
|
||||
checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
@ -39,9 +39,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.2.7"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6"
|
||||
checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
@ -59,9 +59,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.133"
|
||||
version = "0.2.137"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c0f80d65747a3e43d1596c7c5492d95d5edddaabd45a7fcdb02b95f644164966"
|
||||
checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
@ -90,9 +90,9 @@ checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.8.4"
|
||||
version = "0.8.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf"
|
||||
checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
@ -105,7 +105,7 @@ name = "miri-test-deps"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"getrandom 0.1.16",
|
||||
"getrandom 0.2.7",
|
||||
"getrandom 0.2.8",
|
||||
"libc",
|
||||
"num_cpus",
|
||||
"page_size",
|
||||
@ -115,9 +115,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "num_cpus"
|
||||
version = "1.13.1"
|
||||
version = "1.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1"
|
||||
checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
@ -125,9 +125,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "page_size"
|
||||
version = "0.4.2"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd"
|
||||
checksum = "1b7663cbd190cfd818d08efa8497f6cd383076688c49a391ef7c0d03cd12b561"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi",
|
||||
@ -145,9 +145,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot_core"
|
||||
version = "0.9.3"
|
||||
version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929"
|
||||
checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
@ -164,15 +164,15 @@ checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116"
|
||||
|
||||
[[package]]
|
||||
name = "ppv-lite86"
|
||||
version = "0.2.16"
|
||||
version = "0.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
|
||||
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.45"
|
||||
version = "1.0.47"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3edcd08cf4fea98d1ae6c9ddd3b8ccb1acac7c3693d62625969a7daa04a2ae36"
|
||||
checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@ -213,7 +213,7 @@ version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom 0.2.7",
|
||||
"getrandom 0.2.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -242,9 +242,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "smallvec"
|
||||
version = "1.9.0"
|
||||
version = "1.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
|
||||
checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
@ -258,9 +258,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.101"
|
||||
version = "1.0.103"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e90cde112c4b9690b8cbe810cba9ddd8bc1d7472e2cae317b69e9438c1cba7d2"
|
||||
checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -269,9 +269,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.21.2"
|
||||
version = "1.22.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099"
|
||||
checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"bytes",
|
||||
@ -300,9 +300,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.4"
|
||||
version = "1.0.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcc811dc4066ac62f84f11307873c4850cb653bfa9b1719cee2bd2204a4bc5dd"
|
||||
checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3"
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
@ -340,43 +340,57 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.36.1"
|
||||
version = "0.42.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2"
|
||||
checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
"windows_x86_64_msvc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.36.1"
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.42.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47"
|
||||
checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.42.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.36.1"
|
||||
version = "0.42.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6"
|
||||
checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.36.1"
|
||||
version = "0.42.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024"
|
||||
checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.36.1"
|
||||
version = "0.42.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1"
|
||||
checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.42.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.36.1"
|
||||
version = "0.42.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680"
|
||||
checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5"
|
||||
|
@ -11,11 +11,11 @@ edition = "2021"
|
||||
# all dependencies (and their transitive ones) listed here can be used in `tests/`.
|
||||
tokio = { version = "1.0", features = ["full"] }
|
||||
libc = "0.2"
|
||||
page_size = "0.4.1"
|
||||
page_size = "0.5"
|
||||
num_cpus = "1.10.1"
|
||||
|
||||
getrandom_1 = { package = "getrandom", version = "0.1" }
|
||||
getrandom_2 = { package = "getrandom", version = "0.2" }
|
||||
getrandom = { version = "0.2" }
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
|
||||
[workspace]
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: unsupported operation: can't call foreign function: epoll_create1
|
||||
--> CARGO_REGISTRY/.../epoll.rs:LL:CC
|
||||
|
|
||||
LL | syscall!(epoll_create1(flag)).map(|ep| Selector {
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ can't call foreign function: epoll_create1
|
||||
LL | let res = syscall!(epoll_create1(libc::EPOLL_CLOEXEC));
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ can't call foreign function: epoll_create1
|
||||
|
|
||||
= help: this is likely not a bug in the program; it indicates that the program performed an operation that the interpreter does not support
|
||||
= note: BACKTRACE:
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
#![feature(new_uninit)]
|
||||
|
||||
use std::mem::MaybeUninit;
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
#![feature(new_uninit)]
|
||||
|
||||
use std::ptr::null_mut;
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread::spawn;
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread::spawn;
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread::spawn;
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-disable-isolation -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::mem;
|
||||
use std::thread::{sleep, spawn};
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-disable-isolation -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::mem;
|
||||
use std::thread::{sleep, spawn};
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@compile-flags: -Zmiri-disable-isolation -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{AtomicPtr, Ordering};
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@compile-flags: -Zmiri-disable-isolation -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{AtomicPtr, Ordering};
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-disable-isolation -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
use std::sync::atomic::{fence, AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
// We want to control preemption here. Stacked borrows interferes by having its own accesses.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@compile-flags: -Zmiri-disable-isolation -Zmir-opt-level=0 -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmir-opt-level=0 -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
// Note: mir-opt-level set to 0 to prevent the read of stack_var in thread 1
|
||||
// from being optimized away and preventing the detection of the data-race.
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread::spawn;
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@compile-flags: -Zmiri-disable-isolation -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread::{sleep, spawn};
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@compile-flags: -Zmiri-disable-isolation -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread::spawn;
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::thread::spawn;
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
use std::thread;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
|
@ -1,5 +1,5 @@
|
||||
// We want to control preemption here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::thread::spawn;
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@compile-flags: -Zmiri-disable-isolation -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0
|
||||
//@compile-flags: -Zmiri-disable-weak-memory-emulation -Zmiri-preemption-rate=0 -Zmiri-disable-stacked-borrows
|
||||
|
||||
use std::ptr::null_mut;
|
||||
use std::sync::atomic::{AtomicPtr, Ordering};
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
--> $DIR/aliasing_mut1.rs:LL:CC
|
||||
|
|
||||
LL | pub fn safe(_x: &mut i32, _y: &mut i32) {}
|
||||
| ^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
|
||||
| ^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is protected because it is an argument of call ID
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
--> $DIR/aliasing_mut2.rs:LL:CC
|
||||
|
|
||||
LL | pub fn safe(_x: &i32, _y: &mut i32) {}
|
||||
| ^^ not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is protected because it is an argument of call ID
|
||||
| ^^ not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is protected because it is an argument of call ID
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
--> $DIR/aliasing_mut4.rs:LL:CC
|
||||
|
|
||||
LL | pub fn safe(_x: &i32, _y: &mut Cell<i32>) {}
|
||||
| ^^ not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is protected because it is an argument of call ID
|
||||
| ^^ not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -0,0 +1,14 @@
|
||||
unsafe fn test(mut x: Box<i32>, y: *const i32) -> i32 {
|
||||
// We will call this in a way that x and y alias.
|
||||
*x = 5;
|
||||
std::mem::forget(x);
|
||||
*y //~ERROR: weakly protected
|
||||
}
|
||||
|
||||
fn main() {
|
||||
unsafe {
|
||||
let mut v = 42;
|
||||
let ptr = &mut v as *mut i32;
|
||||
test(Box::from_raw(ptr), ptr);
|
||||
}
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is weakly protected because it is an argument of call ID
|
||||
--> $DIR/box_noalias_violation.rs:LL:CC
|
||||
|
|
||||
LL | *y
|
||||
| ^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is weakly protected because it is an argument of call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
help: <TAG> was created by a SharedReadWrite retag at offsets [0x0..0x4]
|
||||
--> $DIR/box_noalias_violation.rs:LL:CC
|
||||
|
|
||||
LL | let ptr = &mut v as *mut i32;
|
||||
| ^^^^^^
|
||||
help: <TAG> is this argument
|
||||
--> $DIR/box_noalias_violation.rs:LL:CC
|
||||
|
|
||||
LL | unsafe fn test(mut x: Box<i32>, y: *const i32) -> i32 {
|
||||
| ^^^^^
|
||||
= note: BACKTRACE:
|
||||
= note: inside `test` at $DIR/box_noalias_violation.rs:LL:CC
|
||||
note: inside `main` at $DIR/box_noalias_violation.rs:LL:CC
|
||||
--> $DIR/box_noalias_violation.rs:LL:CC
|
||||
|
|
||||
LL | test(Box::from_raw(ptr), ptr);
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
error: aborting due to previous error
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@error-pattern: /deallocating while item \[Unique for .*\] is protected/
|
||||
//@error-pattern: /deallocating while item \[Unique for .*\] is strongly protected/
|
||||
|
||||
fn inner(x: &mut i32, f: fn(&mut i32)) {
|
||||
// `f` may mutate, but it may not deallocate!
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: deallocating while item [Unique for <TAG>] is protected by call ID
|
||||
error: Undefined Behavior: deallocating while item [Unique for <TAG>] is strongly protected by call ID
|
||||
--> RUSTLIB/alloc/src/alloc.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item [Unique for <TAG>] is protected by call ID
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item [Unique for <TAG>] is strongly protected by call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@error-pattern: /deallocating while item \[SharedReadWrite for .*\] is protected/
|
||||
//@error-pattern: /deallocating while item \[SharedReadWrite for .*\] is strongly protected/
|
||||
use std::marker::PhantomPinned;
|
||||
|
||||
pub struct NotUnpin(i32, PhantomPinned);
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: deallocating while item [SharedReadWrite for <TAG>] is protected by call ID
|
||||
error: Undefined Behavior: deallocating while item [SharedReadWrite for <TAG>] is strongly protected by call ID
|
||||
--> RUSTLIB/alloc/src/alloc.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item [SharedReadWrite for <TAG>] is protected by call ID
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ deallocating while item [SharedReadWrite for <TAG>] is strongly protected by call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -0,0 +1,14 @@
|
||||
//@error-pattern: /deallocation .* tag does not exist in the borrow stack/
|
||||
use std::alloc::{alloc, dealloc, Layout};
|
||||
|
||||
fn main() {
|
||||
unsafe {
|
||||
let x = alloc(Layout::from_size_align_unchecked(1, 1));
|
||||
let ptr1 = (&mut *x) as *mut u8;
|
||||
let ptr2 = (&mut *ptr1) as *mut u8;
|
||||
// Invalidate ptr2 by writing to ptr1.
|
||||
ptr1.write(0);
|
||||
// Deallocate through ptr2.
|
||||
dealloc(ptr2, Layout::from_size_align_unchecked(1, 1));
|
||||
}
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
error: Undefined Behavior: attempting deallocation using <TAG> at ALLOC, but that tag does not exist in the borrow stack for this location
|
||||
--> RUSTLIB/alloc/src/alloc.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ attempting deallocation using <TAG> at ALLOC, but that tag does not exist in the borrow stack for this location
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
help: <TAG> was created by a SharedReadWrite retag at offsets [0x0..0x1]
|
||||
--> $DIR/illegal_deALLOC.rs:LL:CC
|
||||
|
|
||||
LL | let ptr2 = (&mut *ptr1) as *mut u8;
|
||||
| ^^^^^^^^^^^^
|
||||
help: <TAG> was later invalidated at offsets [0x0..0x1] by a write access
|
||||
--> $DIR/illegal_deALLOC.rs:LL:CC
|
||||
|
|
||||
LL | ptr1.write(0);
|
||||
| ^^^^^^^^^^^^^
|
||||
= note: BACKTRACE:
|
||||
= note: inside `std::alloc::dealloc` at RUSTLIB/alloc/src/alloc.rs:LL:CC
|
||||
note: inside `main` at $DIR/illegal_deALLOC.rs:LL:CC
|
||||
--> $DIR/illegal_deALLOC.rs:LL:CC
|
||||
|
|
||||
LL | dealloc(ptr2, Layout::from_size_align_unchecked(1, 1));
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
error: aborting due to previous error
|
||||
|
@ -7,6 +7,6 @@ fn main() {
|
||||
fn foo(a: &mut u32, y: *mut u32) -> u32 {
|
||||
*a = 1;
|
||||
let _b = &*a;
|
||||
unsafe { *y = 2 }; //~ ERROR: /not granting access .* because that would remove .* which is protected/
|
||||
unsafe { *y = 2 }; //~ ERROR: /not granting access .* because that would remove .* which is strongly protected/
|
||||
return *a;
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
--> $DIR/illegal_write6.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { *y = 2 };
|
||||
| ^^^^^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
|
||||
| ^^^^^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
--> $DIR/invalidate_against_protector1.rs:LL:CC
|
||||
|
|
||||
LL | let _val = unsafe { *x };
|
||||
| ^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
|
||||
| ^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is protected because it is an argument of call ID
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
--> $DIR/invalidate_against_protector2.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { *x = 0 };
|
||||
| ^^^^^^ not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is protected because it is an argument of call ID
|
||||
| ^^^^^^ not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is protected because it is an argument of call ID
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
--> $DIR/invalidate_against_protector3.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { *x = 0 };
|
||||
| ^^^^^^ not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is protected because it is an argument of call ID
|
||||
| ^^^^^^ not granting access to tag <TAG> because that would remove [SharedReadOnly for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@error-pattern: which is protected
|
||||
//@error-pattern: which is strongly protected
|
||||
struct Newtype<'a>(&'a mut i32, i32);
|
||||
|
||||
fn dealloc_while_running(_n: Newtype<'_>, dealloc: impl FnOnce()) {
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
--> RUSTLIB/alloc/src/boxed.rs:LL:CC
|
||||
|
|
||||
LL | Box(unsafe { Unique::new_unchecked(raw) }, alloc)
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -1,4 +1,4 @@
|
||||
//@error-pattern: which is protected
|
||||
//@error-pattern: which is strongly protected
|
||||
struct Newtype<'a>(&'a mut i32);
|
||||
|
||||
fn dealloc_while_running(_n: Newtype<'_>, dealloc: impl FnOnce()) {
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
|
||||
error: Undefined Behavior: not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
--> RUSTLIB/alloc/src/boxed.rs:LL:CC
|
||||
|
|
||||
LL | Box(unsafe { Unique::new_unchecked(raw) }, alloc)
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is protected because it is an argument of call ID
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ not granting access to tag <TAG> because that would remove [Unique for <TAG>] which is strongly protected because it is an argument of call ID
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
|
@ -0,0 +1,17 @@
|
||||
//! Reborrowing a `&mut !Unpin` must still act like a (fake) read.
|
||||
use std::marker::PhantomPinned;
|
||||
|
||||
struct NotUnpin(i32, PhantomPinned);
|
||||
|
||||
fn main() {
|
||||
unsafe {
|
||||
let mut x = NotUnpin(0, PhantomPinned);
|
||||
// Mutable borrow of `Unpin` field (with lifetime laundering)
|
||||
let fieldref = &mut *(&mut x.0 as *mut i32);
|
||||
// Mutable reborrow of the entire `x`, which is `!Unpin` but should
|
||||
// still count as a read since we would add `dereferenceable`.
|
||||
let _xref = &mut x;
|
||||
// That read should have invalidated `fieldref`.
|
||||
*fieldref = 0; //~ ERROR: /write access .* tag does not exist in the borrow stack/
|
||||
}
|
||||
}
|
@ -0,0 +1,28 @@
|
||||
error: Undefined Behavior: attempting a write access using <TAG> at ALLOC[0x0], but that tag does not exist in the borrow stack for this location
|
||||
--> $DIR/notunpin_dereferenceable_fakeread.rs:LL:CC
|
||||
|
|
||||
LL | *fieldref = 0;
|
||||
| ^^^^^^^^^^^^^
|
||||
| |
|
||||
| attempting a write access using <TAG> at ALLOC[0x0], but that tag does not exist in the borrow stack for this location
|
||||
| this error occurs as part of an access at ALLOC[0x0..0x4]
|
||||
|
|
||||
= help: this indicates a potential bug in the program: it performed an invalid operation, but the Stacked Borrows rules it violated are still experimental
|
||||
= help: see https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/stacked-borrows.md for further information
|
||||
help: <TAG> was created by a Unique retag at offsets [0x0..0x4]
|
||||
--> $DIR/notunpin_dereferenceable_fakeread.rs:LL:CC
|
||||
|
|
||||
LL | let fieldref = &mut *(&mut x.0 as *mut i32);
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
help: <TAG> was later invalidated at offsets [0x0..0x4] by a SharedReadWrite retag
|
||||
--> $DIR/notunpin_dereferenceable_fakeread.rs:LL:CC
|
||||
|
|
||||
LL | let _xref = &mut x;
|
||||
| ^^^^^^
|
||||
= note: BACKTRACE:
|
||||
= note: inside `main` at $DIR/notunpin_dereferenceable_fakeread.rs:LL:CC
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
error: aborting due to previous error
|
||||
|
@ -0,0 +1,31 @@
|
||||
//! Make sure that a retag acts like a write for the data race model.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
#[derive(Copy, Clone)]
|
||||
struct SendPtr(*mut u8);
|
||||
|
||||
unsafe impl Send for SendPtr {}
|
||||
|
||||
fn thread_1(p: SendPtr) {
|
||||
let p = p.0;
|
||||
unsafe {
|
||||
let _r = &*p;
|
||||
}
|
||||
}
|
||||
|
||||
fn thread_2(p: SendPtr) {
|
||||
let p = p.0;
|
||||
unsafe {
|
||||
*p = 5; //~ ERROR: Data race detected between Write on thread `<unnamed>` and Read on thread `<unnamed>`
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut x = 0;
|
||||
let p = std::ptr::addr_of_mut!(x);
|
||||
let p = SendPtr(p);
|
||||
|
||||
let t1 = std::thread::spawn(move || thread_1(p));
|
||||
let t2 = std::thread::spawn(move || thread_2(p));
|
||||
let _ = t1.join();
|
||||
let _ = t2.join();
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
error: Undefined Behavior: Data race detected between Write on thread `<unnamed>` and Read on thread `<unnamed>` at ALLOC
|
||||
--> $DIR/retag_data_race_read.rs:LL:CC
|
||||
|
|
||||
LL | *p = 5;
|
||||
| ^^^^^^ Data race detected between Write on thread `<unnamed>` and Read on thread `<unnamed>` at ALLOC
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
= note: BACKTRACE:
|
||||
= note: inside `thread_2` at $DIR/retag_data_race_read.rs:LL:CC
|
||||
note: inside closure at $DIR/retag_data_race_read.rs:LL:CC
|
||||
--> $DIR/retag_data_race_read.rs:LL:CC
|
||||
|
|
||||
LL | let t2 = std::thread::spawn(move || thread_2(p));
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
error: aborting due to previous error
|
||||
|
@ -0,0 +1,31 @@
|
||||
//! Make sure that a retag acts like a write for the data race model.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
#[derive(Copy, Clone)]
|
||||
struct SendPtr(*mut u8);
|
||||
|
||||
unsafe impl Send for SendPtr {}
|
||||
|
||||
fn thread_1(p: SendPtr) {
|
||||
let p = p.0;
|
||||
unsafe {
|
||||
let _r = &mut *p;
|
||||
}
|
||||
}
|
||||
|
||||
fn thread_2(p: SendPtr) {
|
||||
let p = p.0;
|
||||
unsafe {
|
||||
*p = 5; //~ ERROR: Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>`
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let mut x = 0;
|
||||
let p = std::ptr::addr_of_mut!(x);
|
||||
let p = SendPtr(p);
|
||||
|
||||
let t1 = std::thread::spawn(move || thread_1(p));
|
||||
let t2 = std::thread::spawn(move || thread_2(p));
|
||||
let _ = t1.join();
|
||||
let _ = t2.join();
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
error: Undefined Behavior: Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
||||
--> $DIR/retag_data_race_write.rs:LL:CC
|
||||
|
|
||||
LL | *p = 5;
|
||||
| ^^^^^^ Data race detected between Write on thread `<unnamed>` and Write on thread `<unnamed>` at ALLOC
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
= note: BACKTRACE:
|
||||
= note: inside `thread_2` at $DIR/retag_data_race_write.rs:LL:CC
|
||||
note: inside closure at $DIR/retag_data_race_write.rs:LL:CC
|
||||
--> $DIR/retag_data_race_write.rs:LL:CC
|
||||
|
|
||||
LL | let t2 = std::thread::spawn(move || thread_2(p));
|
||||
| ^^^^^^^^^^^
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
error: aborting due to previous error
|
||||
|
8
src/tools/miri/tests/pass-dep/getrandom_1.rs
Normal file
8
src/tools/miri/tests/pass-dep/getrandom_1.rs
Normal file
@ -0,0 +1,8 @@
|
||||
// mac-os `getrandom_1` does some pointer shenanigans
|
||||
//@compile-flags: -Zmiri-permissive-provenance
|
||||
|
||||
/// Test old version of `getrandom`.
|
||||
fn main() {
|
||||
let mut data = vec![0; 16];
|
||||
getrandom_1::getrandom(&mut data).unwrap();
|
||||
}
|
@ -1,12 +1,10 @@
|
||||
// mac-os `getrandom_1` does some pointer shenanigans
|
||||
//@compile-flags: -Zmiri-permissive-provenance
|
||||
//@compile-flags: -Zmiri-strict-provenance
|
||||
use rand::{rngs::SmallRng, Rng, SeedableRng};
|
||||
|
||||
fn main() {
|
||||
// Test `getrandom` directly (in multiple different versions).
|
||||
// Test `getrandom` directly.
|
||||
let mut data = vec![0; 16];
|
||||
getrandom_1::getrandom(&mut data).unwrap();
|
||||
getrandom_2::getrandom(&mut data).unwrap();
|
||||
getrandom::getrandom(&mut data).unwrap();
|
||||
|
||||
// Try seeding with "real" entropy.
|
||||
let mut rng = SmallRng::from_entropy();
|
||||
|
@ -1,9 +1,5 @@
|
||||
#![feature(lang_items, start)]
|
||||
#![no_std]
|
||||
// windows tls dtors go through libstd right now, thus this test
|
||||
// cannot pass. When windows tls dtors go through the special magic
|
||||
// windows linker section, we can run this test on windows again.
|
||||
//@ignore-target-windows: no-std not supported on Windows
|
||||
|
||||
// Plumbing to let us use `writeln!` to host stdout:
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user