mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-25 08:13:41 +00:00
Auto merge of #131727 - RalfJung:miri-sync, r=RalfJung
Miri subtree update r? `@ghost`
This commit is contained in:
commit
00367d523e
10
Cargo.lock
10
Cargo.lock
@ -377,7 +377,7 @@ dependencies = [
|
||||
"cargo_metadata",
|
||||
"directories",
|
||||
"rustc-build-sysroot",
|
||||
"rustc_tools_util",
|
||||
"rustc_tools_util 0.4.0",
|
||||
"rustc_version",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@ -552,7 +552,7 @@ dependencies = [
|
||||
"parking_lot",
|
||||
"quote",
|
||||
"regex",
|
||||
"rustc_tools_util",
|
||||
"rustc_tools_util 0.3.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"syn 2.0.79",
|
||||
@ -4465,6 +4465,12 @@ version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ba09476327c4b70ccefb6180f046ef588c26a24cf5d269a9feba316eb4f029f"
|
||||
|
||||
[[package]]
|
||||
name = "rustc_tools_util"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3316159ab19e19d1065ecc49278e87f767a9dae9fae80348d2b4d4fa4ae02d4d"
|
||||
|
||||
[[package]]
|
||||
name = "rustc_trait_selection"
|
||||
version = "0.0.0"
|
||||
|
@ -45,6 +45,14 @@ process for such contributions:
|
||||
This process is largely informal, and its primary goal is to more clearly communicate expectations.
|
||||
Please get in touch with us if you have any questions!
|
||||
|
||||
### Managing the review state
|
||||
|
||||
Most PRs bounce back and forth between the reviewer and the author several times, so it is good to
|
||||
keep track of who is expected to take the next step. We are using the `S-waiting-for-review` and
|
||||
`S-waiting-for-author` labels for that. If a reviewer asked you to do some changes and you think
|
||||
they are all taken care of, post a comment saying `@rustbot ready` to mark a PR as ready for the
|
||||
next round of review.
|
||||
|
||||
## Preparing the build environment
|
||||
|
||||
Miri heavily relies on internal and unstable rustc interfaces to execute MIR,
|
||||
@ -195,48 +203,37 @@ installed (`cargo install hyperfine`).
|
||||
|
||||
## Configuring `rust-analyzer`
|
||||
|
||||
To configure `rust-analyzer` and VS Code for working on Miri, save the following
|
||||
to `.vscode/settings.json` in your local Miri clone:
|
||||
To configure `rust-analyzer` and the IDE for working on Miri, copy one of the provided
|
||||
configuration files according to the instructions below. You can also set up a symbolic
|
||||
link to keep the configuration in sync with our recommendations.
|
||||
|
||||
```json
|
||||
{
|
||||
"rust-analyzer.rustc.source": "discover",
|
||||
"rust-analyzer.linkedProjects": [
|
||||
"Cargo.toml",
|
||||
"cargo-miri/Cargo.toml",
|
||||
"miri-script/Cargo.toml",
|
||||
],
|
||||
"rust-analyzer.check.invocationLocation": "root",
|
||||
"rust-analyzer.check.invocationStrategy": "once",
|
||||
"rust-analyzer.check.overrideCommand": [
|
||||
"env",
|
||||
"MIRI_AUTO_OPS=no",
|
||||
"./miri",
|
||||
"clippy", // make this `check` when working with a locally built rustc
|
||||
"--message-format=json",
|
||||
],
|
||||
// Contrary to what the name suggests, this also affects proc macros.
|
||||
"rust-analyzer.cargo.buildScripts.invocationLocation": "root",
|
||||
"rust-analyzer.cargo.buildScripts.invocationStrategy": "once",
|
||||
"rust-analyzer.cargo.buildScripts.overrideCommand": [
|
||||
"env",
|
||||
"MIRI_AUTO_OPS=no",
|
||||
"./miri",
|
||||
"check",
|
||||
"--message-format=json",
|
||||
],
|
||||
}
|
||||
```
|
||||
### Visual Studio Code
|
||||
|
||||
> #### Note
|
||||
>
|
||||
> If you are [building Miri with a locally built rustc][], set
|
||||
> `rust-analyzer.rustcSource` to the relative path from your Miri clone to the
|
||||
> root `Cargo.toml` of the locally built rustc. For example, the path might look
|
||||
> like `../rust/Cargo.toml`.
|
||||
Copy [`etc/rust_analyzer_vscode.json`] to `.vscode/settings.json` in the project root directory.
|
||||
|
||||
[`etc/rust_analyzer_vscode.json`]: https://github.com/rust-lang/miri/blob/master/etc/rust_analyzer_vscode.json
|
||||
|
||||
### Helix
|
||||
|
||||
Copy [`etc/rust_analyzer_helix.toml`] to `.helix/languages.toml` in the project root directory.
|
||||
|
||||
Since working on Miri requires a custom toolchain, and Helix requires the language server
|
||||
to be installed with the toolchain, you have to run `./miri toolchain -c rust-analyzer`
|
||||
when installing the Miri toolchain. Alternatively, set the `RUSTUP_TOOLCHAIN` environment variable according to
|
||||
[the documentation](https://rust-analyzer.github.io/manual.html#toolchain).
|
||||
|
||||
[`etc/rust_analyzer_helix.toml`]: https://github.com/rust-lang/miri/blob/master/etc/rust_analyzer_helix.toml
|
||||
|
||||
### Advanced configuration
|
||||
|
||||
If you are building Miri with a locally built rustc, set
|
||||
`rust-analyzer.rustcSource` to the relative path from your Miri clone to the
|
||||
root `Cargo.toml` of the locally built rustc. For example, the path might look
|
||||
like `../rust/Cargo.toml`. In addition to that, replace `clippy` by `check`
|
||||
in the `rust-analyzer.check.overrideCommand` setting.
|
||||
|
||||
See the rustc-dev-guide's docs on ["Configuring `rust-analyzer` for `rustc`"][rdg-r-a]
|
||||
for more information about configuring VS Code and `rust-analyzer`.
|
||||
for more information about configuring the IDE and `rust-analyzer`.
|
||||
|
||||
[rdg-r-a]: https://rustc-dev-guide.rust-lang.org/building/suggested.html#configuring-rust-analyzer-for-rustc
|
||||
|
||||
|
@ -290,7 +290,7 @@ environment variable. We first document the most relevant and most commonly used
|
||||
* `-Zmiri-compare-exchange-weak-failure-rate=<rate>` changes the failure rate of
|
||||
`compare_exchange_weak` operations. The default is `0.8` (so 4 out of 5 weak ops will fail).
|
||||
You can change it to any value between `0.0` and `1.0`, where `1.0` means it
|
||||
will always fail and `0.0` means it will never fail. Note than setting it to
|
||||
will always fail and `0.0` means it will never fail. Note that setting it to
|
||||
`1.0` will likely cause hangs, since it means programs using
|
||||
`compare_exchange_weak` cannot make progress.
|
||||
* `-Zmiri-disable-isolation` disables host isolation. As a consequence,
|
||||
@ -392,11 +392,6 @@ to Miri failing to detect cases of undefined behavior in a program.
|
||||
but reports to the program that it did actually write. This is useful when you
|
||||
are not interested in the actual program's output, but only want to see Miri's
|
||||
errors and warnings.
|
||||
* `-Zmiri-panic-on-unsupported` will makes some forms of unsupported functionality,
|
||||
such as FFI and unsupported syscalls, panic within the context of the emulated
|
||||
application instead of raising an error within the context of Miri (and halting
|
||||
execution). Note that code might not expect these operations to ever panic, so
|
||||
this flag can lead to strange (mis)behavior.
|
||||
* `-Zmiri-recursive-validation` is a *highly experimental* flag that makes validity checking
|
||||
recurse below references.
|
||||
* `-Zmiri-retag-fields[=<all|none|scalar>]` controls when Stacked Borrows retagging recurses into
|
||||
|
@ -1,6 +1,6 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 3
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
@ -202,9 +202,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustc_tools_util"
|
||||
version = "0.3.0"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ba09476327c4b70ccefb6180f046ef588c26a24cf5d269a9feba316eb4f029f"
|
||||
checksum = "3316159ab19e19d1065ecc49278e87f767a9dae9fae80348d2b4d4fa4ae02d4d"
|
||||
|
||||
[[package]]
|
||||
name = "rustc_version"
|
||||
|
@ -26,4 +26,4 @@ rustc-build-sysroot = "0.5.4"
|
||||
serde = { version = "1.0.185", features = ["derive"] }
|
||||
|
||||
[build-dependencies]
|
||||
rustc_tools_util = "0.3"
|
||||
rustc_tools_util = "0.4"
|
||||
|
@ -150,10 +150,10 @@ case $HOST_TARGET in
|
||||
# Partially supported targets (tier 2)
|
||||
BASIC="empty_main integer heap_alloc libc-mem vec string btreemap" # ensures we have the basics: pre-main code, system allocator
|
||||
UNIX="hello panic/panic panic/unwind concurrency/simple atomic libc-mem libc-misc libc-random env num_cpus" # the things that are very similar across all Unixes, and hence easily supported there
|
||||
TEST_TARGET=x86_64-unknown-freebsd run_tests_minimal $BASIC $UNIX time hashmap random threadname pthread fs
|
||||
TEST_TARGET=i686-unknown-freebsd run_tests_minimal $BASIC $UNIX time hashmap random threadname pthread fs
|
||||
TEST_TARGET=x86_64-unknown-illumos run_tests_minimal $BASIC $UNIX time hashmap random thread sync available-parallelism tls
|
||||
TEST_TARGET=x86_64-pc-solaris run_tests_minimal $BASIC $UNIX time hashmap random thread sync available-parallelism tls
|
||||
TEST_TARGET=x86_64-unknown-freebsd run_tests_minimal $BASIC $UNIX time hashmap random threadname pthread fs libc-pipe
|
||||
TEST_TARGET=i686-unknown-freebsd run_tests_minimal $BASIC $UNIX time hashmap random threadname pthread fs libc-pipe
|
||||
TEST_TARGET=x86_64-unknown-illumos run_tests_minimal $BASIC $UNIX time hashmap random thread sync available-parallelism tls libc-pipe
|
||||
TEST_TARGET=x86_64-pc-solaris run_tests_minimal $BASIC $UNIX time hashmap random thread sync available-parallelism tls libc-pipe
|
||||
TEST_TARGET=aarch64-linux-android run_tests_minimal $BASIC $UNIX time hashmap pthread --skip threadname
|
||||
TEST_TARGET=wasm32-wasip2 run_tests_minimal $BASIC wasm
|
||||
TEST_TARGET=wasm32-unknown-unknown run_tests_minimal no_std empty_main wasm # this target doesn't really have std
|
||||
|
32
src/tools/miri/etc/rust_analyzer_helix.toml
Normal file
32
src/tools/miri/etc/rust_analyzer_helix.toml
Normal file
@ -0,0 +1,32 @@
|
||||
[language-server.rust-analyzer.config.rustc]
|
||||
source = "discover"
|
||||
|
||||
[language-server.rust-analyzer.config]
|
||||
linkedProjects = [
|
||||
"Cargo.toml",
|
||||
"cargo-miri/Cargo.toml",
|
||||
"miri-script/Cargo.toml",
|
||||
]
|
||||
|
||||
[language-server.rust-analyzer.config.check]
|
||||
invocationLocation = "root"
|
||||
invocationStrategy = "once"
|
||||
overrideCommand = [
|
||||
"env",
|
||||
"MIRI_AUTO_OPS=no",
|
||||
"./miri",
|
||||
"clippy", # make this `check` when working with a locally built rustc
|
||||
"--message-format=json",
|
||||
]
|
||||
|
||||
# Contrary to what the name suggests, this also affects proc macros.
|
||||
[language-server.rust-analyzer.config.buildScripts]
|
||||
invocationLocation = "root"
|
||||
invocationStrategy = "once"
|
||||
overrideCommand = [
|
||||
"env",
|
||||
"MIRI_AUTO_OPS=no",
|
||||
"./miri",
|
||||
"check",
|
||||
"--message-format=json",
|
||||
]
|
27
src/tools/miri/etc/rust_analyzer_vscode.json
Normal file
27
src/tools/miri/etc/rust_analyzer_vscode.json
Normal file
@ -0,0 +1,27 @@
|
||||
{
|
||||
"rust-analyzer.rustc.source": "discover",
|
||||
"rust-analyzer.linkedProjects": [
|
||||
"Cargo.toml",
|
||||
"cargo-miri/Cargo.toml",
|
||||
"miri-script/Cargo.toml",
|
||||
],
|
||||
"rust-analyzer.check.invocationLocation": "root",
|
||||
"rust-analyzer.check.invocationStrategy": "once",
|
||||
"rust-analyzer.check.overrideCommand": [
|
||||
"env",
|
||||
"MIRI_AUTO_OPS=no",
|
||||
"./miri",
|
||||
"clippy", // make this `check` when working with a locally built rustc
|
||||
"--message-format=json",
|
||||
],
|
||||
// Contrary to what the name suggests, this also affects proc macros.
|
||||
"rust-analyzer.cargo.buildScripts.invocationLocation": "root",
|
||||
"rust-analyzer.cargo.buildScripts.invocationStrategy": "once",
|
||||
"rust-analyzer.cargo.buildScripts.overrideCommand": [
|
||||
"env",
|
||||
"MIRI_AUTO_OPS=no",
|
||||
"./miri",
|
||||
"check",
|
||||
"--message-format=json",
|
||||
],
|
||||
}
|
@ -1 +1 @@
|
||||
7067e4aee45c18cfa1c6af3bf79bd097684fb294
|
||||
17a19e684cdf3ca088af8b4da6a6209d128913f4
|
||||
|
@ -453,7 +453,7 @@ impl<'tcx> MiriMachine<'tcx> {
|
||||
let thread = self.threads.active_thread();
|
||||
global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || {
|
||||
if let Some(data_race) = &self.data_race {
|
||||
data_race.release_clock(&self.threads).clone()
|
||||
data_race.release_clock(&self.threads, |clock| clock.clone())
|
||||
} else {
|
||||
VClock::default()
|
||||
}
|
||||
|
@ -530,8 +530,6 @@ fn main() {
|
||||
} else if arg == "-Zmiri-ignore-leaks" {
|
||||
miri_config.ignore_leaks = true;
|
||||
miri_config.collect_leak_backtraces = false;
|
||||
} else if arg == "-Zmiri-panic-on-unsupported" {
|
||||
miri_config.panic_on_unsupported = true;
|
||||
} else if arg == "-Zmiri-strict-provenance" {
|
||||
miri_config.provenance_mode = ProvenanceMode::Strict;
|
||||
} else if arg == "-Zmiri-permissive-provenance" {
|
||||
|
@ -828,15 +828,14 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `release` clock of the current thread.
|
||||
/// Calls the callback with the "release" clock of the current thread.
|
||||
/// Other threads can acquire this clock in the future to establish synchronization
|
||||
/// with this program point.
|
||||
fn release_clock<'a>(&'a self) -> Option<Ref<'a, VClock>>
|
||||
where
|
||||
'tcx: 'a,
|
||||
{
|
||||
///
|
||||
/// The closure will only be invoked if data race handling is on.
|
||||
fn release_clock<R>(&self, callback: impl FnOnce(&VClock) -> R) -> Option<R> {
|
||||
let this = self.eval_context_ref();
|
||||
Some(this.machine.data_race.as_ref()?.release_clock(&this.machine.threads))
|
||||
Some(this.machine.data_race.as_ref()?.release_clock(&this.machine.threads, callback))
|
||||
}
|
||||
|
||||
/// Acquire the given clock into the current thread, establishing synchronization with
|
||||
@ -1728,7 +1727,7 @@ impl GlobalState {
|
||||
let current_index = self.active_thread_index(thread_mgr);
|
||||
|
||||
// Store the terminaion clock.
|
||||
let terminaion_clock = self.release_clock(thread_mgr).clone();
|
||||
let terminaion_clock = self.release_clock(thread_mgr, |clock| clock.clone());
|
||||
self.thread_info.get_mut()[current_thread].termination_vector_clock =
|
||||
Some(terminaion_clock);
|
||||
|
||||
@ -1778,21 +1777,23 @@ impl GlobalState {
|
||||
clocks.clock.join(clock);
|
||||
}
|
||||
|
||||
/// Returns the `release` clock of the current thread.
|
||||
/// Calls the given closure with the "release" clock of the current thread.
|
||||
/// Other threads can acquire this clock in the future to establish synchronization
|
||||
/// with this program point.
|
||||
pub fn release_clock<'tcx>(&self, threads: &ThreadManager<'tcx>) -> Ref<'_, VClock> {
|
||||
pub fn release_clock<'tcx, R>(
|
||||
&self,
|
||||
threads: &ThreadManager<'tcx>,
|
||||
callback: impl FnOnce(&VClock) -> R,
|
||||
) -> R {
|
||||
let thread = threads.active_thread();
|
||||
let span = threads.active_thread_ref().current_span();
|
||||
// We increment the clock each time this happens, to ensure no two releases
|
||||
// can be confused with each other.
|
||||
let (index, mut clocks) = self.thread_state_mut(thread);
|
||||
let r = callback(&clocks.clock);
|
||||
// Increment the clock, so that all following events cannot be confused with anything that
|
||||
// occurred before the release. Crucially, the callback is invoked on the *old* clock!
|
||||
clocks.increment_clock(index, span);
|
||||
drop(clocks);
|
||||
// To return a read-only view, we need to release the RefCell
|
||||
// and borrow it again.
|
||||
let (_index, clocks) = self.thread_state(thread);
|
||||
Ref::map(clocks, |c| &c.clock)
|
||||
|
||||
r
|
||||
}
|
||||
|
||||
fn thread_index(&self, thread: ThreadId) -> VectorIdx {
|
||||
|
@ -2,7 +2,7 @@ use std::collections::VecDeque;
|
||||
|
||||
use rustc_index::Idx;
|
||||
|
||||
use super::sync::EvalContextExtPriv as _;
|
||||
use super::thread::DynUnblockCallback;
|
||||
use super::vector_clock::VClock;
|
||||
use crate::*;
|
||||
|
||||
@ -27,22 +27,6 @@ pub(super) struct InitOnce {
|
||||
|
||||
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn init_once_get_or_create_id(
|
||||
&mut self,
|
||||
lock: &MPlaceTy<'tcx>,
|
||||
offset: u64,
|
||||
) -> InterpResult<'tcx, InitOnceId> {
|
||||
let this = self.eval_context_mut();
|
||||
this.get_or_create_id(
|
||||
lock,
|
||||
offset,
|
||||
|ecx| &mut ecx.machine.sync.init_onces,
|
||||
|_| interp_ok(Default::default()),
|
||||
)?
|
||||
.ok_or_else(|| err_ub_format!("init_once has invalid ID"))
|
||||
.into()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn init_once_status(&mut self, id: InitOnceId) -> InitOnceStatus {
|
||||
let this = self.eval_context_ref();
|
||||
@ -51,11 +35,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
/// Put the thread into the queue waiting for the initialization.
|
||||
#[inline]
|
||||
fn init_once_enqueue_and_block(
|
||||
&mut self,
|
||||
id: InitOnceId,
|
||||
callback: impl UnblockCallback<'tcx> + 'tcx,
|
||||
) {
|
||||
fn init_once_enqueue_and_block(&mut self, id: InitOnceId, callback: DynUnblockCallback<'tcx>) {
|
||||
let this = self.eval_context_mut();
|
||||
let thread = this.active_thread();
|
||||
let init_once = &mut this.machine.sync.init_onces[id];
|
||||
@ -93,7 +73,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
// Each complete happens-before the end of the wait
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
init_once.clock.clone_from(&data_race.release_clock(&this.machine.threads));
|
||||
data_race
|
||||
.release_clock(&this.machine.threads, |clock| init_once.clock.clone_from(clock));
|
||||
}
|
||||
|
||||
// Wake up everyone.
|
||||
@ -119,7 +100,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
// Each complete happens-before the end of the wait
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
init_once.clock.clone_from(&data_race.release_clock(&this.machine.threads));
|
||||
data_race
|
||||
.release_clock(&this.machine.threads, |clock| init_once.clock.clone_from(clock));
|
||||
}
|
||||
|
||||
// Wake up one waiting thread, so they can go ahead and try to init this.
|
||||
|
@ -1,4 +1,3 @@
|
||||
use std::any::Any;
|
||||
use std::collections::VecDeque;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::ops::Not;
|
||||
@ -12,11 +11,6 @@ use super::init_once::InitOnce;
|
||||
use super::vector_clock::VClock;
|
||||
use crate::*;
|
||||
|
||||
pub trait SyncId {
|
||||
fn from_u32(id: u32) -> Self;
|
||||
fn to_u32(&self) -> u32;
|
||||
}
|
||||
|
||||
/// We cannot use the `newtype_index!` macro because we have to use 0 as a
|
||||
/// sentinel value meaning that the identifier is not assigned. This is because
|
||||
/// the pthreads static initializers initialize memory with zeros (see the
|
||||
@ -28,16 +22,6 @@ macro_rules! declare_id {
|
||||
#[derive(Clone, Copy, Debug, PartialOrd, Ord, PartialEq, Eq, Hash)]
|
||||
pub struct $name(std::num::NonZero<u32>);
|
||||
|
||||
impl $crate::concurrency::sync::SyncId for $name {
|
||||
// Panics if `id == 0`.
|
||||
fn from_u32(id: u32) -> Self {
|
||||
Self(std::num::NonZero::new(id).unwrap())
|
||||
}
|
||||
fn to_u32(&self) -> u32 {
|
||||
self.0.get()
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::VisitProvenance for $name {
|
||||
fn visit_provenance(&self, _visit: &mut VisitWith<'_>) {}
|
||||
}
|
||||
@ -56,12 +40,6 @@ macro_rules! declare_id {
|
||||
usize::try_from(self.0.get() - 1).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl $name {
|
||||
pub fn to_u32_scalar(&self) -> Scalar {
|
||||
Scalar::from_u32(self.0.get())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
pub(super) use declare_id;
|
||||
@ -79,9 +57,6 @@ struct Mutex {
|
||||
queue: VecDeque<ThreadId>,
|
||||
/// Mutex clock. This tracks the moment of the last unlock.
|
||||
clock: VClock,
|
||||
|
||||
/// Additional data that can be set by shim implementations.
|
||||
data: Option<Box<dyn Any>>,
|
||||
}
|
||||
|
||||
declare_id!(RwLockId);
|
||||
@ -118,9 +93,6 @@ struct RwLock {
|
||||
/// locks.
|
||||
/// This is only relevant when there is an active reader.
|
||||
clock_current_readers: VClock,
|
||||
|
||||
/// Additional data that can be set by shim implementations.
|
||||
data: Option<Box<dyn Any>>,
|
||||
}
|
||||
|
||||
declare_id!(CondvarId);
|
||||
@ -135,9 +107,6 @@ struct Condvar {
|
||||
/// Contains the clock of the last thread to
|
||||
/// perform a condvar-signal.
|
||||
clock: VClock,
|
||||
|
||||
/// Additional data that can be set by shim implementations.
|
||||
data: Option<Box<dyn Any>>,
|
||||
}
|
||||
|
||||
/// The futex state.
|
||||
@ -167,89 +136,15 @@ pub struct SynchronizationObjects {
|
||||
mutexes: IndexVec<MutexId, Mutex>,
|
||||
rwlocks: IndexVec<RwLockId, RwLock>,
|
||||
condvars: IndexVec<CondvarId, Condvar>,
|
||||
futexes: FxHashMap<u64, Futex>,
|
||||
pub(super) init_onces: IndexVec<InitOnceId, InitOnce>,
|
||||
|
||||
/// Futex info for the futex at the given address.
|
||||
futexes: FxHashMap<u64, Futex>,
|
||||
}
|
||||
|
||||
// Private extension trait for local helper methods
|
||||
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
/// Lazily initialize the ID of this Miri sync structure.
|
||||
/// If memory stores '0', that indicates uninit and we generate a new instance.
|
||||
/// Returns `None` if memory stores a non-zero invalid ID.
|
||||
///
|
||||
/// `get_objs` must return the `IndexVec` that stores all the objects of this type.
|
||||
/// `create_obj` must create the new object if initialization is needed.
|
||||
#[inline]
|
||||
fn get_or_create_id<Id: SyncId + Idx, T>(
|
||||
&mut self,
|
||||
lock: &MPlaceTy<'tcx>,
|
||||
offset: u64,
|
||||
get_objs: impl for<'a> Fn(&'a mut MiriInterpCx<'tcx>) -> &'a mut IndexVec<Id, T>,
|
||||
create_obj: impl for<'a> FnOnce(&'a mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
|
||||
) -> InterpResult<'tcx, Option<Id>> {
|
||||
let this = self.eval_context_mut();
|
||||
let offset = Size::from_bytes(offset);
|
||||
assert!(lock.layout.size >= offset + this.machine.layouts.u32.size);
|
||||
let id_place = lock.offset(offset, this.machine.layouts.u32, this)?;
|
||||
let next_index = get_objs(this).next_index();
|
||||
|
||||
// Since we are lazy, this update has to be atomic.
|
||||
let (old, success) = this
|
||||
.atomic_compare_exchange_scalar(
|
||||
&id_place,
|
||||
&ImmTy::from_uint(0u32, this.machine.layouts.u32),
|
||||
Scalar::from_u32(next_index.to_u32()),
|
||||
AtomicRwOrd::Relaxed, // deliberately *no* synchronization
|
||||
AtomicReadOrd::Relaxed,
|
||||
false,
|
||||
)?
|
||||
.to_scalar_pair();
|
||||
|
||||
interp_ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
|
||||
// We set the in-memory ID to `next_index`, now also create this object in the machine
|
||||
// state.
|
||||
let obj = create_obj(this)?;
|
||||
let new_index = get_objs(this).push(obj);
|
||||
assert_eq!(next_index, new_index);
|
||||
Some(new_index)
|
||||
} else {
|
||||
let id = Id::from_u32(old.to_u32().expect("layout is u32"));
|
||||
if get_objs(this).get(id).is_none() {
|
||||
// The in-memory ID is invalid.
|
||||
None
|
||||
} else {
|
||||
Some(id)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Eagerly creates a Miri sync structure.
|
||||
///
|
||||
/// `create_id` will store the index of the sync_structure in the memory pointed to by
|
||||
/// `lock_op`, so that future calls to `get_or_create_id` will see it as initialized.
|
||||
/// - `lock_op` must hold a pointer to the sync structure.
|
||||
/// - `lock_layout` must be the memory layout of the sync structure.
|
||||
/// - `offset` must be the offset inside the sync structure where its miri id will be stored.
|
||||
/// - `get_objs` is described in `get_or_create_id`.
|
||||
/// - `obj` must be the new sync object.
|
||||
fn create_id<Id: SyncId + Idx, T>(
|
||||
&mut self,
|
||||
lock: &MPlaceTy<'tcx>,
|
||||
offset: u64,
|
||||
get_objs: impl for<'a> Fn(&'a mut MiriInterpCx<'tcx>) -> &'a mut IndexVec<Id, T>,
|
||||
obj: T,
|
||||
) -> InterpResult<'tcx, Id> {
|
||||
let this = self.eval_context_mut();
|
||||
let offset = Size::from_bytes(offset);
|
||||
assert!(lock.layout.size >= offset + this.machine.layouts.u32.size);
|
||||
let id_place = lock.offset(offset, this.machine.layouts.u32, this)?;
|
||||
|
||||
let new_index = get_objs(this).push(obj);
|
||||
this.write_scalar(Scalar::from_u32(new_index.to_u32()), &id_place)?;
|
||||
interp_ok(new_index)
|
||||
}
|
||||
|
||||
fn condvar_reacquire_mutex(
|
||||
&mut self,
|
||||
mutex: MutexId,
|
||||
@ -270,124 +165,135 @@ pub(super) trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
}
|
||||
}
|
||||
|
||||
impl SynchronizationObjects {
|
||||
pub fn mutex_create(&mut self) -> MutexId {
|
||||
self.mutexes.push(Default::default())
|
||||
}
|
||||
|
||||
pub fn rwlock_create(&mut self) -> RwLockId {
|
||||
self.rwlocks.push(Default::default())
|
||||
}
|
||||
|
||||
pub fn condvar_create(&mut self) -> CondvarId {
|
||||
self.condvars.push(Default::default())
|
||||
}
|
||||
|
||||
pub fn init_once_create(&mut self) -> InitOnceId {
|
||||
self.init_onces.push(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx> AllocExtra<'tcx> {
|
||||
pub fn get_sync<T: 'static>(&self, offset: Size) -> Option<&T> {
|
||||
self.sync.get(&offset).and_then(|s| s.downcast_ref::<T>())
|
||||
}
|
||||
}
|
||||
|
||||
/// We designate an `init`` field in all primitives.
|
||||
/// If `init` is set to this, we consider the primitive initialized.
|
||||
pub const LAZY_INIT_COOKIE: u32 = 0xcafe_affe;
|
||||
|
||||
// Public interface to synchronization primitives. Please note that in most
|
||||
// cases, the function calls are infallible and it is the client's (shim
|
||||
// implementation's) responsibility to detect and deal with erroneous
|
||||
// situations.
|
||||
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
/// Eagerly create and initialize a new mutex.
|
||||
fn mutex_create(
|
||||
/// Helper for lazily initialized `alloc_extra.sync` data:
|
||||
/// this forces an immediate init.
|
||||
fn lazy_sync_init<T: 'static + Copy>(
|
||||
&mut self,
|
||||
lock: &MPlaceTy<'tcx>,
|
||||
offset: u64,
|
||||
data: Option<Box<dyn Any>>,
|
||||
) -> InterpResult<'tcx, MutexId> {
|
||||
primitive: &MPlaceTy<'tcx>,
|
||||
init_offset: Size,
|
||||
data: T,
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
this.create_id(lock, offset, |ecx| &mut ecx.machine.sync.mutexes, Mutex {
|
||||
data,
|
||||
..Default::default()
|
||||
})
|
||||
|
||||
let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
|
||||
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
|
||||
alloc_extra.sync.insert(offset, Box::new(data));
|
||||
// Mark this as "initialized".
|
||||
let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
|
||||
this.write_scalar_atomic(
|
||||
Scalar::from_u32(LAZY_INIT_COOKIE),
|
||||
&init_field,
|
||||
AtomicWriteOrd::Relaxed,
|
||||
)?;
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Lazily create a new mutex.
|
||||
/// `initialize_data` must return any additional data that a user wants to associate with the mutex.
|
||||
fn mutex_get_or_create_id(
|
||||
/// Helper for lazily initialized `alloc_extra.sync` data:
|
||||
/// Checks if the primitive is initialized:
|
||||
/// - If yes, fetches the data from `alloc_extra.sync`, or calls `missing_data` if that fails
|
||||
/// and stores that in `alloc_extra.sync`.
|
||||
/// - Otherwise, calls `new_data` to initialize the primitive.
|
||||
fn lazy_sync_get_data<T: 'static + Copy>(
|
||||
&mut self,
|
||||
lock: &MPlaceTy<'tcx>,
|
||||
offset: u64,
|
||||
initialize_data: impl for<'a> FnOnce(
|
||||
&'a mut MiriInterpCx<'tcx>,
|
||||
) -> InterpResult<'tcx, Option<Box<dyn Any>>>,
|
||||
) -> InterpResult<'tcx, MutexId> {
|
||||
primitive: &MPlaceTy<'tcx>,
|
||||
init_offset: Size,
|
||||
missing_data: impl FnOnce() -> InterpResult<'tcx, T>,
|
||||
new_data: impl FnOnce(&mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, T>,
|
||||
) -> InterpResult<'tcx, T> {
|
||||
let this = self.eval_context_mut();
|
||||
this.get_or_create_id(
|
||||
lock,
|
||||
offset,
|
||||
|ecx| &mut ecx.machine.sync.mutexes,
|
||||
|ecx| initialize_data(ecx).map(|data| Mutex { data, ..Default::default() }),
|
||||
)?
|
||||
.ok_or_else(|| err_ub_format!("mutex has invalid ID"))
|
||||
.into()
|
||||
|
||||
// Check if this is already initialized. Needs to be atomic because we can race with another
|
||||
// thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
|
||||
// So we just try to replace MUTEX_INIT_COOKIE with itself.
|
||||
let init_cookie = Scalar::from_u32(LAZY_INIT_COOKIE);
|
||||
let init_field = primitive.offset(init_offset, this.machine.layouts.u32, this)?;
|
||||
let (_init, success) = this
|
||||
.atomic_compare_exchange_scalar(
|
||||
&init_field,
|
||||
&ImmTy::from_scalar(init_cookie, this.machine.layouts.u32),
|
||||
init_cookie,
|
||||
AtomicRwOrd::Relaxed,
|
||||
AtomicReadOrd::Relaxed,
|
||||
/* can_fail_spuriously */ false,
|
||||
)?
|
||||
.to_scalar_pair();
|
||||
|
||||
if success.to_bool()? {
|
||||
// If it is initialized, it must be found in the "sync primitive" table,
|
||||
// or else it has been moved illegally.
|
||||
let (alloc, offset, _) = this.ptr_get_alloc_id(primitive.ptr(), 0)?;
|
||||
let (alloc_extra, _machine) = this.get_alloc_extra_mut(alloc)?;
|
||||
if let Some(data) = alloc_extra.get_sync::<T>(offset) {
|
||||
interp_ok(*data)
|
||||
} else {
|
||||
let data = missing_data()?;
|
||||
alloc_extra.sync.insert(offset, Box::new(data));
|
||||
interp_ok(data)
|
||||
}
|
||||
} else {
|
||||
let data = new_data(this)?;
|
||||
this.lazy_sync_init(primitive, init_offset, data)?;
|
||||
interp_ok(data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieve the additional data stored for a mutex.
|
||||
fn mutex_get_data<'a, T: 'static>(&'a mut self, id: MutexId) -> Option<&'a T>
|
||||
/// Get the synchronization primitive associated with the given pointer,
|
||||
/// or initialize a new one.
|
||||
fn get_sync_or_init<'a, T: 'static>(
|
||||
&'a mut self,
|
||||
ptr: Pointer,
|
||||
new: impl FnOnce(&'a mut MiriMachine<'tcx>) -> InterpResult<'tcx, T>,
|
||||
) -> InterpResult<'tcx, &'a T>
|
||||
where
|
||||
'tcx: 'a,
|
||||
{
|
||||
let this = self.eval_context_ref();
|
||||
this.machine.sync.mutexes[id].data.as_deref().and_then(|p| p.downcast_ref::<T>())
|
||||
}
|
||||
|
||||
fn rwlock_get_or_create_id(
|
||||
&mut self,
|
||||
lock: &MPlaceTy<'tcx>,
|
||||
offset: u64,
|
||||
initialize_data: impl for<'a> FnOnce(
|
||||
&'a mut MiriInterpCx<'tcx>,
|
||||
) -> InterpResult<'tcx, Option<Box<dyn Any>>>,
|
||||
) -> InterpResult<'tcx, RwLockId> {
|
||||
let this = self.eval_context_mut();
|
||||
this.get_or_create_id(
|
||||
lock,
|
||||
offset,
|
||||
|ecx| &mut ecx.machine.sync.rwlocks,
|
||||
|ecx| initialize_data(ecx).map(|data| RwLock { data, ..Default::default() }),
|
||||
)?
|
||||
.ok_or_else(|| err_ub_format!("rwlock has invalid ID"))
|
||||
.into()
|
||||
}
|
||||
// Ensure there is memory behind this pointer, so that this allocation
|
||||
// is truly the only place where the data could be stored.
|
||||
this.check_ptr_access(ptr, Size::from_bytes(1), CheckInAllocMsg::InboundsTest)?;
|
||||
|
||||
/// Retrieve the additional data stored for a rwlock.
|
||||
fn rwlock_get_data<'a, T: 'static>(&'a mut self, id: RwLockId) -> Option<&'a T>
|
||||
where
|
||||
'tcx: 'a,
|
||||
{
|
||||
let this = self.eval_context_ref();
|
||||
this.machine.sync.rwlocks[id].data.as_deref().and_then(|p| p.downcast_ref::<T>())
|
||||
}
|
||||
|
||||
/// Eagerly create and initialize a new condvar.
|
||||
fn condvar_create(
|
||||
&mut self,
|
||||
condvar: &MPlaceTy<'tcx>,
|
||||
offset: u64,
|
||||
data: Option<Box<dyn Any>>,
|
||||
) -> InterpResult<'tcx, CondvarId> {
|
||||
let this = self.eval_context_mut();
|
||||
this.create_id(condvar, offset, |ecx| &mut ecx.machine.sync.condvars, Condvar {
|
||||
data,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
fn condvar_get_or_create_id(
|
||||
&mut self,
|
||||
lock: &MPlaceTy<'tcx>,
|
||||
offset: u64,
|
||||
initialize_data: impl for<'a> FnOnce(
|
||||
&'a mut MiriInterpCx<'tcx>,
|
||||
) -> InterpResult<'tcx, Option<Box<dyn Any>>>,
|
||||
) -> InterpResult<'tcx, CondvarId> {
|
||||
let this = self.eval_context_mut();
|
||||
this.get_or_create_id(
|
||||
lock,
|
||||
offset,
|
||||
|ecx| &mut ecx.machine.sync.condvars,
|
||||
|ecx| initialize_data(ecx).map(|data| Condvar { data, ..Default::default() }),
|
||||
)?
|
||||
.ok_or_else(|| err_ub_format!("condvar has invalid ID"))
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Retrieve the additional data stored for a condvar.
|
||||
fn condvar_get_data<'a, T: 'static>(&'a mut self, id: CondvarId) -> Option<&'a T>
|
||||
where
|
||||
'tcx: 'a,
|
||||
{
|
||||
let this = self.eval_context_ref();
|
||||
this.machine.sync.condvars[id].data.as_deref().and_then(|p| p.downcast_ref::<T>())
|
||||
let (alloc, offset, _) = this.ptr_get_alloc_id(ptr, 0)?;
|
||||
let (alloc_extra, machine) = this.get_alloc_extra_mut(alloc)?;
|
||||
// Due to borrow checker reasons, we have to do the lookup twice.
|
||||
if alloc_extra.get_sync::<T>(offset).is_none() {
|
||||
let new = new(machine)?;
|
||||
alloc_extra.sync.insert(offset, Box::new(new));
|
||||
}
|
||||
interp_ok(alloc_extra.get_sync::<T>(offset).unwrap())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -444,7 +350,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
// The mutex is completely unlocked. Try transferring ownership
|
||||
// to another thread.
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
mutex.clock.clone_from(&data_race.release_clock(&this.machine.threads));
|
||||
data_race.release_clock(&this.machine.threads, |clock| {
|
||||
mutex.clock.clone_from(clock)
|
||||
});
|
||||
}
|
||||
if let Some(thread) = this.machine.sync.mutexes[id].queue.pop_front() {
|
||||
this.unblock_thread(thread, BlockReason::Mutex(id))?;
|
||||
@ -553,7 +461,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
}
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
// Add this to the shared-release clock of all concurrent readers.
|
||||
rwlock.clock_current_readers.join(&data_race.release_clock(&this.machine.threads));
|
||||
data_race.release_clock(&this.machine.threads, |clock| {
|
||||
rwlock.clock_current_readers.join(clock)
|
||||
});
|
||||
}
|
||||
|
||||
// The thread was a reader. If the lock is not held any more, give it to a writer.
|
||||
@ -632,7 +542,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, thread);
|
||||
// Record release clock for next lock holder.
|
||||
if let Some(data_race) = &this.machine.data_race {
|
||||
rwlock.clock_unlocked.clone_from(&*data_race.release_clock(&this.machine.threads));
|
||||
data_race.release_clock(&this.machine.threads, |clock| {
|
||||
rwlock.clock_unlocked.clone_from(clock)
|
||||
});
|
||||
}
|
||||
// The thread was a writer.
|
||||
//
|
||||
@ -764,7 +676,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
// Each condvar signal happens-before the end of the condvar wake
|
||||
if let Some(data_race) = data_race {
|
||||
condvar.clock.clone_from(&*data_race.release_clock(&this.machine.threads));
|
||||
data_race.release_clock(&this.machine.threads, |clock| condvar.clock.clone_from(clock));
|
||||
}
|
||||
let Some(waiter) = condvar.waiters.pop_front() else {
|
||||
return interp_ok(false);
|
||||
@ -837,7 +749,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
// Each futex-wake happens-before the end of the futex wait
|
||||
if let Some(data_race) = data_race {
|
||||
futex.clock.clone_from(&*data_race.release_clock(&this.machine.threads));
|
||||
data_race.release_clock(&this.machine.threads, |clock| futex.clock.clone_from(clock));
|
||||
}
|
||||
|
||||
// Wake up the first thread in the queue that matches any of the bits in the bitset.
|
||||
|
@ -50,7 +50,7 @@ pub trait UnblockCallback<'tcx>: VisitProvenance {
|
||||
fn timeout(self: Box<Self>, _ecx: &mut InterpCx<'tcx, MiriMachine<'tcx>>)
|
||||
-> InterpResult<'tcx>;
|
||||
}
|
||||
type DynUnblockCallback<'tcx> = Box<dyn UnblockCallback<'tcx> + 'tcx>;
|
||||
pub type DynUnblockCallback<'tcx> = Box<dyn UnblockCallback<'tcx> + 'tcx>;
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! callback {
|
||||
@ -59,7 +59,7 @@ macro_rules! callback {
|
||||
@unblock = |$this:ident| $unblock:block
|
||||
) => {
|
||||
callback!(
|
||||
@capture<$tcx, $($lft),*> { $($name: $type),+ }
|
||||
@capture<$tcx, $($lft),*> { $($name: $type),* }
|
||||
@unblock = |$this| $unblock
|
||||
@timeout = |_this| {
|
||||
unreachable!(
|
||||
@ -101,7 +101,7 @@ macro_rules! callback {
|
||||
}
|
||||
}
|
||||
|
||||
Callback { $($name,)* _phantom: std::marker::PhantomData }
|
||||
Box::new(Callback { $($name,)* _phantom: std::marker::PhantomData })
|
||||
}}
|
||||
}
|
||||
|
||||
@ -715,11 +715,11 @@ impl<'tcx> ThreadManager<'tcx> {
|
||||
&mut self,
|
||||
reason: BlockReason,
|
||||
timeout: Option<Timeout>,
|
||||
callback: impl UnblockCallback<'tcx> + 'tcx,
|
||||
callback: DynUnblockCallback<'tcx>,
|
||||
) {
|
||||
let state = &mut self.threads[self.active_thread].state;
|
||||
assert!(state.is_enabled());
|
||||
*state = ThreadState::Blocked { reason, timeout, callback: Box::new(callback) }
|
||||
*state = ThreadState::Blocked { reason, timeout, callback }
|
||||
}
|
||||
|
||||
/// Change the active thread to some enabled thread.
|
||||
@ -1032,7 +1032,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
&mut self,
|
||||
reason: BlockReason,
|
||||
timeout: Option<(TimeoutClock, TimeoutAnchor, Duration)>,
|
||||
callback: impl UnblockCallback<'tcx> + 'tcx,
|
||||
callback: DynUnblockCallback<'tcx>,
|
||||
) {
|
||||
let this = self.eval_context_mut();
|
||||
let timeout = timeout.map(|(clock, anchor, duration)| {
|
||||
|
@ -129,8 +129,6 @@ pub struct MiriConfig {
|
||||
/// If `Some`, enable the `measureme` profiler, writing results to a file
|
||||
/// with the specified prefix.
|
||||
pub measureme_out: Option<String>,
|
||||
/// Panic when unsupported functionality is encountered.
|
||||
pub panic_on_unsupported: bool,
|
||||
/// Which style to use for printing backtraces.
|
||||
pub backtrace_style: BacktraceStyle,
|
||||
/// Which provenance to use for int2ptr casts
|
||||
@ -183,7 +181,6 @@ impl Default for MiriConfig {
|
||||
track_outdated_loads: false,
|
||||
cmpxchg_weak_failure_rate: 0.8, // 80%
|
||||
measureme_out: None,
|
||||
panic_on_unsupported: false,
|
||||
backtrace_style: BacktraceStyle::Short,
|
||||
provenance_mode: ProvenanceMode::Default,
|
||||
mute_stdout_stderr: false,
|
||||
|
@ -14,7 +14,6 @@ use rustc_index::IndexVec;
|
||||
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
|
||||
use rustc_middle::middle::dependency_format::Linkage;
|
||||
use rustc_middle::middle::exported_symbols::ExportedSymbol;
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, MaybeResult, TyAndLayout};
|
||||
use rustc_middle::ty::{self, FloatTy, IntTy, Ty, TyCtxt, UintTy};
|
||||
use rustc_session::config::CrateType;
|
||||
@ -224,14 +223,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
}
|
||||
|
||||
/// Evaluates the scalar at the specified path.
|
||||
fn eval_path(&self, path: &[&str]) -> OpTy<'tcx> {
|
||||
fn eval_path(&self, path: &[&str]) -> MPlaceTy<'tcx> {
|
||||
let this = self.eval_context_ref();
|
||||
let instance = resolve_path(*this.tcx, path, Namespace::ValueNS);
|
||||
// We don't give a span -- this isn't actually used directly by the program anyway.
|
||||
let const_val = this.eval_global(instance).unwrap_or_else(|err| {
|
||||
this.eval_global(instance).unwrap_or_else(|err| {
|
||||
panic!("failed to evaluate required Rust item: {path:?}\n{err:?}")
|
||||
});
|
||||
const_val.into()
|
||||
})
|
||||
}
|
||||
fn eval_path_scalar(&self, path: &[&str]) -> Scalar {
|
||||
let this = self.eval_context_ref();
|
||||
@ -949,21 +947,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
crate_name == "std" || crate_name == "std_miri_test"
|
||||
}
|
||||
|
||||
/// Handler that should be called when an unsupported foreign item is encountered.
|
||||
/// This function will either panic within the context of the emulated application
|
||||
/// or return an error in the Miri process context
|
||||
fn handle_unsupported_foreign_item(&mut self, error_msg: String) -> InterpResult<'tcx, ()> {
|
||||
let this = self.eval_context_mut();
|
||||
if this.machine.panic_on_unsupported {
|
||||
// message is slightly different here to make automated analysis easier
|
||||
let error_msg = format!("unsupported Miri functionality: {error_msg}");
|
||||
this.start_panic(error_msg.as_ref(), mir::UnwindAction::Continue)?;
|
||||
interp_ok(())
|
||||
} else {
|
||||
throw_machine_stop!(TerminationInfo::UnsupportedForeignItem(error_msg));
|
||||
}
|
||||
}
|
||||
|
||||
fn check_abi_and_shim_symbol_clash(
|
||||
&mut self,
|
||||
abi: Abi,
|
||||
@ -1196,6 +1179,21 @@ where
|
||||
throw_ub_format!("incorrect number of arguments: got {}, expected {}", args.len(), N)
|
||||
}
|
||||
|
||||
/// Check that the number of args is at least the minumim what we expect.
|
||||
pub fn check_min_arg_count<'a, 'tcx, const N: usize>(
|
||||
name: &'a str,
|
||||
args: &'a [OpTy<'tcx>],
|
||||
) -> InterpResult<'tcx, &'a [OpTy<'tcx>; N]> {
|
||||
if let Some((ops, _)) = args.split_first_chunk() {
|
||||
return interp_ok(ops);
|
||||
}
|
||||
throw_ub_format!(
|
||||
"incorrect number of arguments for `{name}`: got {}, expected at least {}",
|
||||
args.len(),
|
||||
N
|
||||
)
|
||||
}
|
||||
|
||||
pub fn isolation_abort_error<'tcx>(name: &str) -> InterpResult<'tcx> {
|
||||
throw_machine_stop!(TerminationInfo::UnsupportedInIsolation(format!(
|
||||
"{name} not available when isolation is enabled",
|
||||
|
@ -301,6 +301,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
let b = this.read_scalar(b)?.to_f32()?;
|
||||
let c = this.read_scalar(c)?.to_f32()?;
|
||||
let fuse: bool = this.machine.rng.get_mut().gen();
|
||||
#[allow(clippy::arithmetic_side_effects)] // float ops don't overflow
|
||||
let res = if fuse {
|
||||
// FIXME: Using host floats, to work around https://github.com/rust-lang/rustc_apfloat/issues/11
|
||||
a.to_host().mul_add(b.to_host(), c.to_host()).to_soft()
|
||||
@ -316,6 +317,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
let b = this.read_scalar(b)?.to_f64()?;
|
||||
let c = this.read_scalar(c)?.to_f64()?;
|
||||
let fuse: bool = this.machine.rng.get_mut().gen();
|
||||
#[allow(clippy::arithmetic_side_effects)] // float ops don't overflow
|
||||
let res = if fuse {
|
||||
// FIXME: Using host floats, to work around https://github.com/rust-lang/rustc_apfloat/issues/11
|
||||
a.to_host().mul_add(b.to_host(), c.to_host()).to_soft()
|
||||
|
@ -1,6 +1,7 @@
|
||||
//! Global machine state as well as implementation of the interpreter engine
|
||||
//! `Machine` trait.
|
||||
|
||||
use std::any::Any;
|
||||
use std::borrow::Cow;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::hash_map::Entry;
|
||||
@ -336,6 +337,11 @@ pub struct AllocExtra<'tcx> {
|
||||
/// if this allocation is leakable. The backtrace is not
|
||||
/// pruned yet; that should be done before printing it.
|
||||
pub backtrace: Option<Vec<FrameInfo<'tcx>>>,
|
||||
/// Synchronization primitives like to attach extra data to particular addresses. We store that
|
||||
/// inside the relevant allocation, to ensure that everything is removed when the allocation is
|
||||
/// freed.
|
||||
/// This maps offsets to synchronization-primitive-specific data.
|
||||
pub sync: FxHashMap<Size, Box<dyn Any>>,
|
||||
}
|
||||
|
||||
// We need a `Clone` impl because the machine passes `Allocation` through `Cow`...
|
||||
@ -348,7 +354,7 @@ impl<'tcx> Clone for AllocExtra<'tcx> {
|
||||
|
||||
impl VisitProvenance for AllocExtra<'_> {
|
||||
fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
|
||||
let AllocExtra { borrow_tracker, data_race, weak_memory, backtrace: _ } = self;
|
||||
let AllocExtra { borrow_tracker, data_race, weak_memory, backtrace: _, sync: _ } = self;
|
||||
|
||||
borrow_tracker.visit_provenance(visit);
|
||||
data_race.visit_provenance(visit);
|
||||
@ -504,11 +510,6 @@ pub struct MiriMachine<'tcx> {
|
||||
/// `None` means no `Instance` exported under the given name is found.
|
||||
pub(crate) exported_symbols_cache: FxHashMap<Symbol, Option<Instance<'tcx>>>,
|
||||
|
||||
/// Whether to raise a panic in the context of the evaluated process when unsupported
|
||||
/// functionality is encountered. If `false`, an error is propagated in the Miri application context
|
||||
/// instead (default behavior)
|
||||
pub(crate) panic_on_unsupported: bool,
|
||||
|
||||
/// Equivalent setting as RUST_BACKTRACE on encountering an error.
|
||||
pub(crate) backtrace_style: BacktraceStyle,
|
||||
|
||||
@ -675,7 +676,6 @@ impl<'tcx> MiriMachine<'tcx> {
|
||||
profiler,
|
||||
string_cache: Default::default(),
|
||||
exported_symbols_cache: FxHashMap::default(),
|
||||
panic_on_unsupported: config.panic_on_unsupported,
|
||||
backtrace_style: config.backtrace_style,
|
||||
local_crates,
|
||||
extern_statics: FxHashMap::default(),
|
||||
@ -815,7 +815,6 @@ impl VisitProvenance for MiriMachine<'_> {
|
||||
profiler: _,
|
||||
string_cache: _,
|
||||
exported_symbols_cache: _,
|
||||
panic_on_unsupported: _,
|
||||
backtrace_style: _,
|
||||
local_crates: _,
|
||||
rng: _,
|
||||
@ -1194,7 +1193,13 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> {
|
||||
.insert(id, (ecx.machine.current_span(), None));
|
||||
}
|
||||
|
||||
interp_ok(AllocExtra { borrow_tracker, data_race, weak_memory, backtrace })
|
||||
interp_ok(AllocExtra {
|
||||
borrow_tracker,
|
||||
data_race,
|
||||
weak_memory,
|
||||
backtrace,
|
||||
sync: FxHashMap::default(),
|
||||
})
|
||||
}
|
||||
|
||||
fn adjust_alloc_root_pointer(
|
||||
|
@ -5,6 +5,7 @@ use rustc_span::{BytePos, Loc, Symbol, hygiene};
|
||||
use rustc_target::abi::Size;
|
||||
use rustc_target::spec::abi::Abi;
|
||||
|
||||
use crate::helpers::check_min_arg_count;
|
||||
use crate::*;
|
||||
|
||||
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
@ -39,11 +40,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let tcx = this.tcx;
|
||||
|
||||
let flags = if let Some(flags_op) = args.first() {
|
||||
this.read_scalar(flags_op)?.to_u64()?
|
||||
} else {
|
||||
throw_ub_format!("expected at least 1 argument")
|
||||
};
|
||||
let [flags] = check_min_arg_count("miri_get_backtrace", args)?;
|
||||
let flags = this.read_scalar(flags)?.to_u64()?;
|
||||
|
||||
let mut data = Vec::new();
|
||||
for frame in this.active_thread_stack().iter().rev() {
|
||||
|
@ -83,11 +83,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
return interp_ok(Some(body));
|
||||
}
|
||||
|
||||
this.handle_unsupported_foreign_item(format!(
|
||||
throw_machine_stop!(TerminationInfo::UnsupportedForeignItem(format!(
|
||||
"can't call foreign function `{link_name}` on OS `{os}`",
|
||||
os = this.tcx.sess.target.os,
|
||||
))?;
|
||||
return interp_ok(None);
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@ use std::rc::{Rc, Weak};
|
||||
|
||||
use rustc_target::abi::Size;
|
||||
|
||||
use crate::helpers::check_min_arg_count;
|
||||
use crate::shims::unix::linux::epoll::EpollReadyEvents;
|
||||
use crate::shims::unix::*;
|
||||
use crate::*;
|
||||
@ -481,56 +482,62 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn fcntl(&mut self, args: &[OpTy<'tcx>]) -> InterpResult<'tcx, Scalar> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let [fd_num, cmd, ..] = args else {
|
||||
throw_ub_format!(
|
||||
"incorrect number of arguments for fcntl: got {}, expected at least 2",
|
||||
args.len()
|
||||
);
|
||||
};
|
||||
let [fd_num, cmd] = check_min_arg_count("fcntl", args)?;
|
||||
|
||||
let fd_num = this.read_scalar(fd_num)?.to_i32()?;
|
||||
let cmd = this.read_scalar(cmd)?.to_i32()?;
|
||||
|
||||
let f_getfd = this.eval_libc_i32("F_GETFD");
|
||||
let f_dupfd = this.eval_libc_i32("F_DUPFD");
|
||||
let f_dupfd_cloexec = this.eval_libc_i32("F_DUPFD_CLOEXEC");
|
||||
|
||||
// We only support getting the flags for a descriptor.
|
||||
if cmd == this.eval_libc_i32("F_GETFD") {
|
||||
// Currently this is the only flag that `F_GETFD` returns. It is OK to just return the
|
||||
// `FD_CLOEXEC` value without checking if the flag is set for the file because `std`
|
||||
// always sets this flag when opening a file. However we still need to check that the
|
||||
// file itself is open.
|
||||
interp_ok(Scalar::from_i32(if this.machine.fds.is_fd_num(fd_num) {
|
||||
this.eval_libc_i32("FD_CLOEXEC")
|
||||
} else {
|
||||
this.fd_not_found()?
|
||||
}))
|
||||
} else if cmd == this.eval_libc_i32("F_DUPFD")
|
||||
|| cmd == this.eval_libc_i32("F_DUPFD_CLOEXEC")
|
||||
{
|
||||
// Note that we always assume the FD_CLOEXEC flag is set for every open file, in part
|
||||
// because exec() isn't supported. The F_DUPFD and F_DUPFD_CLOEXEC commands only
|
||||
// differ in whether the FD_CLOEXEC flag is pre-set on the new file descriptor,
|
||||
// thus they can share the same implementation here.
|
||||
let [_, _, start, ..] = args else {
|
||||
throw_ub_format!(
|
||||
"incorrect number of arguments for fcntl with cmd=`F_DUPFD`/`F_DUPFD_CLOEXEC`: got {}, expected at least 3",
|
||||
args.len()
|
||||
);
|
||||
};
|
||||
let start = this.read_scalar(start)?.to_i32()?;
|
||||
|
||||
match this.machine.fds.get(fd_num) {
|
||||
Some(fd) =>
|
||||
interp_ok(Scalar::from_i32(this.machine.fds.insert_with_min_num(fd, start))),
|
||||
None => interp_ok(Scalar::from_i32(this.fd_not_found()?)),
|
||||
}
|
||||
} else if this.tcx.sess.target.os == "macos" && cmd == this.eval_libc_i32("F_FULLFSYNC") {
|
||||
// Reject if isolation is enabled.
|
||||
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
|
||||
this.reject_in_isolation("`fcntl`", reject_with)?;
|
||||
return this.set_last_error_and_return_i32(ErrorKind::PermissionDenied);
|
||||
match cmd {
|
||||
cmd if cmd == f_getfd => {
|
||||
// Currently this is the only flag that `F_GETFD` returns. It is OK to just return the
|
||||
// `FD_CLOEXEC` value without checking if the flag is set for the file because `std`
|
||||
// always sets this flag when opening a file. However we still need to check that the
|
||||
// file itself is open.
|
||||
interp_ok(Scalar::from_i32(if this.machine.fds.is_fd_num(fd_num) {
|
||||
this.eval_libc_i32("FD_CLOEXEC")
|
||||
} else {
|
||||
this.fd_not_found()?
|
||||
}))
|
||||
}
|
||||
cmd if cmd == f_dupfd || cmd == f_dupfd_cloexec => {
|
||||
// Note that we always assume the FD_CLOEXEC flag is set for every open file, in part
|
||||
// because exec() isn't supported. The F_DUPFD and F_DUPFD_CLOEXEC commands only
|
||||
// differ in whether the FD_CLOEXEC flag is pre-set on the new file descriptor,
|
||||
// thus they can share the same implementation here.
|
||||
let cmd_name = if cmd == f_dupfd {
|
||||
"fcntl(fd, F_DUPFD, ...)"
|
||||
} else {
|
||||
"fcntl(fd, F_DUPFD_CLOEXEC, ...)"
|
||||
};
|
||||
|
||||
this.ffullsync_fd(fd_num)
|
||||
} else {
|
||||
throw_unsup_format!("the {:#x} command is not supported for `fcntl`)", cmd);
|
||||
let [_, _, start] = check_min_arg_count(cmd_name, args)?;
|
||||
let start = this.read_scalar(start)?.to_i32()?;
|
||||
|
||||
if let Some(fd) = this.machine.fds.get(fd_num) {
|
||||
interp_ok(Scalar::from_i32(this.machine.fds.insert_with_min_num(fd, start)))
|
||||
} else {
|
||||
interp_ok(Scalar::from_i32(this.fd_not_found()?))
|
||||
}
|
||||
}
|
||||
cmd if this.tcx.sess.target.os == "macos"
|
||||
&& cmd == this.eval_libc_i32("F_FULLFSYNC") =>
|
||||
{
|
||||
// Reject if isolation is enabled.
|
||||
if let IsolatedOp::Reject(reject_with) = this.machine.isolated_op {
|
||||
this.reject_in_isolation("`fcntl`", reject_with)?;
|
||||
return this.set_last_error_and_return_i32(ErrorKind::PermissionDenied);
|
||||
}
|
||||
|
||||
this.ffullsync_fd(fd_num)
|
||||
}
|
||||
cmd => {
|
||||
throw_unsup_format!("fcntl: unsupported command {cmd:#x}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -292,6 +292,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
this.write_scalar(result, dest)?;
|
||||
}
|
||||
"pipe2" => {
|
||||
// Currently this function does not exist on all Unixes, e.g. on macOS.
|
||||
if !matches!(&*this.tcx.sess.target.os, "linux" | "freebsd" | "solaris" | "illumos") {
|
||||
throw_unsup_format!(
|
||||
"`pipe2` is not supported on {}",
|
||||
this.tcx.sess.target.os
|
||||
);
|
||||
}
|
||||
let [pipefd, flags] =
|
||||
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
|
||||
let result = this.pipe2(pipefd, Some(flags))?;
|
||||
|
@ -34,11 +34,15 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
"pthread_get_name_np" => {
|
||||
let [thread, name, len] =
|
||||
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
|
||||
// FreeBSD's pthread_get_name_np does not return anything.
|
||||
// FreeBSD's pthread_get_name_np does not return anything
|
||||
// and uses strlcpy, which truncates the resulting value,
|
||||
// but always adds a null terminator (except for zero-sized buffers).
|
||||
// https://github.com/freebsd/freebsd-src/blob/c2d93a803acef634bd0eede6673aeea59e90c277/lib/libthr/thread/thr_info.c#L119-L144
|
||||
this.pthread_getname_np(
|
||||
this.read_scalar(thread)?,
|
||||
this.read_scalar(name)?,
|
||||
this.read_scalar(len)?,
|
||||
/* truncate */ true,
|
||||
)?;
|
||||
}
|
||||
|
||||
|
@ -13,6 +13,7 @@ use rustc_target::abi::Size;
|
||||
|
||||
use self::fd::FlockOp;
|
||||
use self::shims::time::system_time_to_duration;
|
||||
use crate::helpers::check_min_arg_count;
|
||||
use crate::shims::os_str::bytes_to_os_str;
|
||||
use crate::shims::unix::fd::FileDescriptionRef;
|
||||
use crate::shims::unix::*;
|
||||
@ -433,12 +434,7 @@ fn maybe_sync_file(
|
||||
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn open(&mut self, args: &[OpTy<'tcx>]) -> InterpResult<'tcx, Scalar> {
|
||||
let [path_raw, flag, ..] = args else {
|
||||
throw_ub_format!(
|
||||
"incorrect number of arguments for `open`: got {}, expected at least 2",
|
||||
args.len()
|
||||
);
|
||||
};
|
||||
let [path_raw, flag] = check_min_arg_count("open", args)?;
|
||||
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
@ -492,14 +488,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
// Get the mode. On macOS, the argument type `mode_t` is actually `u16`, but
|
||||
// C integer promotion rules mean that on the ABI level, it gets passed as `u32`
|
||||
// (see https://github.com/rust-lang/rust/issues/71915).
|
||||
let mode = if let Some(arg) = args.get(2) {
|
||||
this.read_scalar(arg)?.to_u32()?
|
||||
} else {
|
||||
throw_ub_format!(
|
||||
"incorrect number of arguments for `open` with `O_CREAT`: got {}, expected at least 3",
|
||||
args.len()
|
||||
);
|
||||
};
|
||||
let [_, _, mode] = check_min_arg_count("open(pathname, O_CREAT, ...)", args)?;
|
||||
let mode = this.read_scalar(mode)?.to_u32()?;
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
|
@ -32,11 +32,13 @@ pub struct EpollEventInstance {
|
||||
events: u32,
|
||||
/// Original data retrieved from `epoll_event` during `epoll_ctl`.
|
||||
data: u64,
|
||||
/// The release clock associated with this event.
|
||||
clock: VClock,
|
||||
}
|
||||
|
||||
impl EpollEventInstance {
|
||||
pub fn new(events: u32, data: u64) -> EpollEventInstance {
|
||||
EpollEventInstance { events, data }
|
||||
EpollEventInstance { events, data, clock: Default::default() }
|
||||
}
|
||||
}
|
||||
|
||||
@ -92,7 +94,6 @@ pub struct EpollReadyEvents {
|
||||
#[derive(Debug, Default)]
|
||||
struct ReadyList {
|
||||
mapping: RefCell<BTreeMap<(FdId, i32), EpollEventInstance>>,
|
||||
clock: RefCell<VClock>,
|
||||
}
|
||||
|
||||
impl EpollReadyEvents {
|
||||
@ -480,7 +481,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
}
|
||||
if timeout == 0 || !ready_list_empty {
|
||||
// If the ready list is not empty, or the timeout is 0, we can return immediately.
|
||||
blocking_epoll_callback(epfd_value, weak_epfd, dest, &event, this)?;
|
||||
return_ready_list(epfd_value, weak_epfd, dest, &event, this)?;
|
||||
} else {
|
||||
// Blocking
|
||||
let timeout = match timeout {
|
||||
@ -508,7 +509,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
event: MPlaceTy<'tcx>,
|
||||
}
|
||||
@unblock = |this| {
|
||||
blocking_epoll_callback(epfd_value, weak_epfd, &dest, &event, this)?;
|
||||
return_ready_list(epfd_value, weak_epfd, &dest, &event, this)?;
|
||||
interp_ok(())
|
||||
}
|
||||
@timeout = |this| {
|
||||
@ -567,11 +568,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
let epoll = epfd.downcast::<Epoll>().unwrap();
|
||||
|
||||
// Synchronize running thread to the epoll ready list.
|
||||
if let Some(clock) = &this.release_clock() {
|
||||
epoll.ready_list.clock.borrow_mut().join(clock);
|
||||
}
|
||||
|
||||
if let Some(thread_id) = epoll.thread_id.borrow_mut().pop() {
|
||||
waiter.push(thread_id);
|
||||
};
|
||||
@ -627,7 +623,11 @@ fn check_and_update_one_event_interest<'tcx>(
|
||||
if flags != 0 {
|
||||
let epoll_key = (id, epoll_event_interest.fd_num);
|
||||
let ready_list = &mut epoll_event_interest.ready_list.mapping.borrow_mut();
|
||||
let event_instance = EpollEventInstance::new(flags, epoll_event_interest.data);
|
||||
let mut event_instance = EpollEventInstance::new(flags, epoll_event_interest.data);
|
||||
// If we are tracking data races, remember the current clock so we can sync with it later.
|
||||
ecx.release_clock(|clock| {
|
||||
event_instance.clock.clone_from(clock);
|
||||
});
|
||||
// Triggers the notification by inserting it to the ready list.
|
||||
ready_list.insert(epoll_key, event_instance);
|
||||
interp_ok(true)
|
||||
@ -636,8 +636,9 @@ fn check_and_update_one_event_interest<'tcx>(
|
||||
}
|
||||
}
|
||||
|
||||
/// Callback function after epoll_wait unblocks
|
||||
fn blocking_epoll_callback<'tcx>(
|
||||
/// Stores the ready list of the `epfd` epoll instance into `events` (which must be an array),
|
||||
/// and the number of returned events into `dest`.
|
||||
fn return_ready_list<'tcx>(
|
||||
epfd_value: i32,
|
||||
weak_epfd: WeakFileDescriptionRef,
|
||||
dest: &MPlaceTy<'tcx>,
|
||||
@ -654,9 +655,6 @@ fn blocking_epoll_callback<'tcx>(
|
||||
|
||||
let ready_list = epoll_file_description.get_ready_list();
|
||||
|
||||
// Synchronize waking thread from the epoll ready list.
|
||||
ecx.acquire_clock(&ready_list.clock.borrow());
|
||||
|
||||
let mut ready_list = ready_list.mapping.borrow_mut();
|
||||
let mut num_of_events: i32 = 0;
|
||||
let mut array_iter = ecx.project_array_fields(events)?;
|
||||
@ -670,6 +668,9 @@ fn blocking_epoll_callback<'tcx>(
|
||||
],
|
||||
&des.1,
|
||||
)?;
|
||||
// Synchronize waking thread with the event of interest.
|
||||
ecx.acquire_clock(&epoll_event_instance.clock);
|
||||
|
||||
num_of_events = num_of_events.strict_add(1);
|
||||
} else {
|
||||
break;
|
||||
|
@ -140,9 +140,9 @@ impl FileDescription for Event {
|
||||
match self.counter.get().checked_add(num) {
|
||||
Some(new_count @ 0..=MAX_COUNTER) => {
|
||||
// Future `read` calls will synchronize with this write, so update the FD clock.
|
||||
if let Some(clock) = &ecx.release_clock() {
|
||||
ecx.release_clock(|clock| {
|
||||
self.clock.borrow_mut().join(clock);
|
||||
}
|
||||
});
|
||||
self.counter.set(new_count);
|
||||
}
|
||||
None | Some(u64::MAX) =>
|
||||
|
@ -5,10 +5,16 @@ use self::shims::unix::linux::epoll::EvalContextExt as _;
|
||||
use self::shims::unix::linux::eventfd::EvalContextExt as _;
|
||||
use self::shims::unix::linux::mem::EvalContextExt as _;
|
||||
use self::shims::unix::linux::sync::futex;
|
||||
use crate::helpers::check_min_arg_count;
|
||||
use crate::machine::{SIGRTMAX, SIGRTMIN};
|
||||
use crate::shims::unix::*;
|
||||
use crate::*;
|
||||
|
||||
// The documentation of glibc complains that the kernel never exposes
|
||||
// TASK_COMM_LEN through the headers, so it's assumed to always be 16 bytes
|
||||
// long including a null terminator.
|
||||
const TASK_COMM_LEN: usize = 16;
|
||||
|
||||
pub fn is_dyn_sym(name: &str) -> bool {
|
||||
matches!(name, "statx")
|
||||
}
|
||||
@ -74,22 +80,32 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
"pthread_setname_np" => {
|
||||
let [thread, name] =
|
||||
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
|
||||
let max_len = 16;
|
||||
let res = this.pthread_setname_np(
|
||||
this.read_scalar(thread)?,
|
||||
this.read_scalar(name)?,
|
||||
max_len,
|
||||
TASK_COMM_LEN,
|
||||
)?;
|
||||
let res = if res { Scalar::from_u32(0) } else { this.eval_libc("ERANGE") };
|
||||
this.write_scalar(res, dest)?;
|
||||
}
|
||||
"pthread_getname_np" => {
|
||||
let [thread, name, len] =
|
||||
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
|
||||
let res = this.pthread_getname_np(
|
||||
this.read_scalar(thread)?,
|
||||
this.read_scalar(name)?,
|
||||
this.read_scalar(len)?,
|
||||
)?;
|
||||
// The function's behavior isn't portable between platforms.
|
||||
// In case of glibc, the length of the output buffer must
|
||||
// be not shorter than TASK_COMM_LEN.
|
||||
let len = this.read_scalar(len)?;
|
||||
let res = if len.to_target_usize(this)? >= TASK_COMM_LEN as u64
|
||||
&& this.pthread_getname_np(
|
||||
this.read_scalar(thread)?,
|
||||
this.read_scalar(name)?,
|
||||
len,
|
||||
/* truncate*/ false,
|
||||
)? {
|
||||
Scalar::from_u32(0)
|
||||
} else {
|
||||
this.eval_libc("ERANGE")
|
||||
};
|
||||
this.write_scalar(res, dest)?;
|
||||
}
|
||||
"gettid" => {
|
||||
@ -110,24 +126,17 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
let sys_getrandom = this.eval_libc("SYS_getrandom").to_target_usize(this)?;
|
||||
let sys_futex = this.eval_libc("SYS_futex").to_target_usize(this)?;
|
||||
let sys_eventfd2 = this.eval_libc("SYS_eventfd2").to_target_usize(this)?;
|
||||
|
||||
if args.is_empty() {
|
||||
throw_ub_format!(
|
||||
"incorrect number of arguments for syscall: got 0, expected at least 1"
|
||||
);
|
||||
}
|
||||
match this.read_target_usize(&args[0])? {
|
||||
let [op] = check_min_arg_count("syscall", args)?;
|
||||
match this.read_target_usize(op)? {
|
||||
// `libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)`
|
||||
// is called if a `HashMap` is created the regular way (e.g. HashMap<K, V>).
|
||||
id if id == sys_getrandom => {
|
||||
num if num == sys_getrandom => {
|
||||
// Used by getrandom 0.1
|
||||
// The first argument is the syscall id, so skip over it.
|
||||
let [_, ptr, len, flags, ..] = args else {
|
||||
throw_ub_format!(
|
||||
"incorrect number of arguments for `getrandom` syscall: got {}, expected at least 4",
|
||||
args.len()
|
||||
);
|
||||
};
|
||||
let [_, ptr, len, flags] =
|
||||
check_min_arg_count("syscall(SYS_getrandom, ...)", args)?;
|
||||
|
||||
let ptr = this.read_pointer(ptr)?;
|
||||
let len = this.read_target_usize(len)?;
|
||||
@ -140,14 +149,18 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
this.write_scalar(Scalar::from_target_usize(len, this), dest)?;
|
||||
}
|
||||
// `futex` is used by some synchronization primitives.
|
||||
id if id == sys_futex => {
|
||||
futex(this, &args[1..], dest)?;
|
||||
num if num == sys_futex => {
|
||||
futex(this, args, dest)?;
|
||||
}
|
||||
id => {
|
||||
this.handle_unsupported_foreign_item(format!(
|
||||
"can't execute syscall with ID {id}"
|
||||
))?;
|
||||
return interp_ok(EmulateItemResult::AlreadyJumped);
|
||||
num if num == sys_eventfd2 => {
|
||||
let [_, initval, flags] =
|
||||
check_min_arg_count("syscall(SYS_evetfd2, ...)", args)?;
|
||||
|
||||
let result = this.eventfd(initval, flags)?;
|
||||
this.write_int(result.to_i32()?, dest)?;
|
||||
}
|
||||
num => {
|
||||
throw_unsup_format!("syscall: unsupported syscall number {num}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
use crate::helpers::check_min_arg_count;
|
||||
use crate::*;
|
||||
|
||||
/// Implementation of the SYS_futex syscall.
|
||||
/// `args` is the arguments *after* the syscall number.
|
||||
/// `args` is the arguments *including* the syscall number.
|
||||
pub fn futex<'tcx>(
|
||||
this: &mut MiriInterpCx<'tcx>,
|
||||
args: &[OpTy<'tcx>],
|
||||
@ -15,12 +16,7 @@ pub fn futex<'tcx>(
|
||||
// may or may not be left out from the `syscall()` call.
|
||||
// Therefore we don't use `check_arg_count` here, but only check for the
|
||||
// number of arguments to fall within a range.
|
||||
let [addr, op, val, ..] = args else {
|
||||
throw_ub_format!(
|
||||
"incorrect number of arguments for `futex` syscall: got {}, expected at least 3",
|
||||
args.len()
|
||||
);
|
||||
};
|
||||
let [_, addr, op, val] = check_min_arg_count("`syscall(SYS_futex, ...)`", args)?;
|
||||
|
||||
// The first three arguments (after the syscall number itself) are the same to all futex operations:
|
||||
// (int *addr, int op, int val).
|
||||
@ -54,24 +50,16 @@ pub fn futex<'tcx>(
|
||||
op if op & !futex_realtime == futex_wait || op & !futex_realtime == futex_wait_bitset => {
|
||||
let wait_bitset = op & !futex_realtime == futex_wait_bitset;
|
||||
|
||||
let bitset = if wait_bitset {
|
||||
let [_, _, _, timeout, uaddr2, bitset, ..] = args else {
|
||||
throw_ub_format!(
|
||||
"incorrect number of arguments for `futex` syscall with `op=FUTEX_WAIT_BITSET`: got {}, expected at least 6",
|
||||
args.len()
|
||||
);
|
||||
};
|
||||
let (timeout, bitset) = if wait_bitset {
|
||||
let [_, _, _, _, timeout, uaddr2, bitset] =
|
||||
check_min_arg_count("`syscall(SYS_futex, FUTEX_WAIT_BITSET, ...)`", args)?;
|
||||
let _timeout = this.read_pointer(timeout)?;
|
||||
let _uaddr2 = this.read_pointer(uaddr2)?;
|
||||
this.read_scalar(bitset)?.to_u32()?
|
||||
(timeout, this.read_scalar(bitset)?.to_u32()?)
|
||||
} else {
|
||||
if args.len() < 4 {
|
||||
throw_ub_format!(
|
||||
"incorrect number of arguments for `futex` syscall with `op=FUTEX_WAIT`: got {}, expected at least 4",
|
||||
args.len()
|
||||
);
|
||||
}
|
||||
u32::MAX
|
||||
let [_, _, _, _, timeout] =
|
||||
check_min_arg_count("`syscall(SYS_futex, FUTEX_WAIT, ...)`", args)?;
|
||||
(timeout, u32::MAX)
|
||||
};
|
||||
|
||||
if bitset == 0 {
|
||||
@ -80,7 +68,7 @@ pub fn futex<'tcx>(
|
||||
return interp_ok(());
|
||||
}
|
||||
|
||||
let timeout = this.deref_pointer_as(&args[3], this.libc_ty_layout("timespec"))?;
|
||||
let timeout = this.deref_pointer_as(timeout, this.libc_ty_layout("timespec"))?;
|
||||
let timeout = if this.ptr_is_null(timeout.ptr())? {
|
||||
None
|
||||
} else {
|
||||
@ -183,12 +171,8 @@ pub fn futex<'tcx>(
|
||||
// Same as FUTEX_WAKE, but allows you to specify a bitset to select which threads to wake up.
|
||||
op if op == futex_wake || op == futex_wake_bitset => {
|
||||
let bitset = if op == futex_wake_bitset {
|
||||
let [_, _, _, timeout, uaddr2, bitset, ..] = args else {
|
||||
throw_ub_format!(
|
||||
"incorrect number of arguments for `futex` syscall with `op=FUTEX_WAKE_BITSET`: got {}, expected at least 6",
|
||||
args.len()
|
||||
);
|
||||
};
|
||||
let [_, _, _, _, timeout, uaddr2, bitset] =
|
||||
check_min_arg_count("`syscall(SYS_futex, FUTEX_WAKE_BITSET, ...)`", args)?;
|
||||
let _timeout = this.read_pointer(timeout)?;
|
||||
let _uaddr2 = this.read_pointer(uaddr2)?;
|
||||
this.read_scalar(bitset)?.to_u32()?
|
||||
|
@ -164,13 +164,28 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
// Threading
|
||||
"pthread_setname_np" => {
|
||||
let [name] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
|
||||
|
||||
// The real implementation has logic in two places:
|
||||
// * in userland at https://github.com/apple-oss-distributions/libpthread/blob/c032e0b076700a0a47db75528a282b8d3a06531a/src/pthread.c#L1178-L1200,
|
||||
// * in kernel at https://github.com/apple-oss-distributions/xnu/blob/8d741a5de7ff4191bf97d57b9f54c2f6d4a15585/bsd/kern/proc_info.c#L3218-L3227.
|
||||
//
|
||||
// The function in libc calls the kernel to validate
|
||||
// the security policies and the input. If all of the requirements
|
||||
// are met, then the name is set and 0 is returned. Otherwise, if
|
||||
// the specified name is lomnger than MAXTHREADNAMESIZE, then
|
||||
// ENAMETOOLONG is returned.
|
||||
//
|
||||
// FIXME: the real implementation maybe returns ESRCH if the thread ID is invalid.
|
||||
let thread = this.pthread_self()?;
|
||||
let max_len = this.eval_libc("MAXTHREADNAMESIZE").to_target_usize(this)?;
|
||||
let res = this.pthread_setname_np(
|
||||
let res = if this.pthread_setname_np(
|
||||
thread,
|
||||
this.read_scalar(name)?,
|
||||
max_len.try_into().unwrap(),
|
||||
)?;
|
||||
this.eval_libc("MAXTHREADNAMESIZE").to_target_usize(this)?.try_into().unwrap(),
|
||||
)? {
|
||||
Scalar::from_u32(0)
|
||||
} else {
|
||||
this.eval_libc("ENAMETOOLONG")
|
||||
};
|
||||
// Contrary to the manpage, `pthread_setname_np` on macOS still
|
||||
// returns an integer indicating success.
|
||||
this.write_scalar(res, dest)?;
|
||||
@ -178,10 +193,23 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
"pthread_getname_np" => {
|
||||
let [thread, name, len] =
|
||||
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
|
||||
let res = this.pthread_getname_np(
|
||||
|
||||
// The function's behavior isn't portable between platforms.
|
||||
// In case of macOS, a truncated name (due to a too small buffer)
|
||||
// does not lead to an error.
|
||||
//
|
||||
// For details, see the implementation at
|
||||
// https://github.com/apple-oss-distributions/libpthread/blob/c032e0b076700a0a47db75528a282b8d3a06531a/src/pthread.c#L1160-L1175.
|
||||
// The key part is the strlcpy, which truncates the resulting value,
|
||||
// but always null terminates (except for zero sized buffers).
|
||||
//
|
||||
// FIXME: the real implementation returns ESRCH if the thread ID is invalid.
|
||||
let res = Scalar::from_u32(0);
|
||||
this.pthread_getname_np(
|
||||
this.read_scalar(thread)?,
|
||||
this.read_scalar(name)?,
|
||||
this.read_scalar(len)?,
|
||||
/* truncate */ true,
|
||||
)?;
|
||||
this.write_scalar(res, dest)?;
|
||||
}
|
||||
|
@ -10,17 +10,42 @@
|
||||
//! and we do not detect copying of the lock, but macOS doesn't guarantee anything
|
||||
//! in that case either.
|
||||
|
||||
use rustc_target::abi::Size;
|
||||
|
||||
use crate::*;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
enum MacOsUnfairLock {
|
||||
Poisoned,
|
||||
Active { id: MutexId },
|
||||
}
|
||||
|
||||
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn os_unfair_lock_getid(&mut self, lock_ptr: &OpTy<'tcx>) -> InterpResult<'tcx, MutexId> {
|
||||
fn os_unfair_lock_get_data(
|
||||
&mut self,
|
||||
lock_ptr: &OpTy<'tcx>,
|
||||
) -> InterpResult<'tcx, MacOsUnfairLock> {
|
||||
let this = self.eval_context_mut();
|
||||
let lock = this.deref_pointer(lock_ptr)?;
|
||||
// os_unfair_lock holds a 32-bit value, is initialized with zero and
|
||||
// must be assumed to be opaque. Therefore, we can just store our
|
||||
// internal mutex ID in the structure without anyone noticing.
|
||||
this.mutex_get_or_create_id(&lock, 0, |_| interp_ok(None))
|
||||
this.lazy_sync_get_data(
|
||||
&lock,
|
||||
Size::ZERO, // offset for init tracking
|
||||
|| {
|
||||
// If we get here, due to how we reset things to zero in `os_unfair_lock_unlock`,
|
||||
// this means the lock was moved while locked. This can happen with a `std` lock,
|
||||
// but then any future attempt to unlock will just deadlock. In practice, terrible
|
||||
// things can probably happen if you swap two locked locks, since they'd wake up
|
||||
// from the wrong queue... we just won't catch all UB of this library API then (we
|
||||
// would need to store some unique identifer in-memory for this, instead of a static
|
||||
// LAZY_INIT_COOKIE). This can't be hit via `std::sync::Mutex`.
|
||||
interp_ok(MacOsUnfairLock::Poisoned)
|
||||
},
|
||||
|ecx| {
|
||||
let id = ecx.machine.sync.mutex_create();
|
||||
interp_ok(MacOsUnfairLock::Active { id })
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -29,7 +54,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn os_unfair_lock_lock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = this.os_unfair_lock_getid(lock_op)?;
|
||||
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
|
||||
// Trying to get a poisoned lock. Just block forever...
|
||||
this.block_thread(
|
||||
BlockReason::Sleep,
|
||||
None,
|
||||
callback!(
|
||||
@capture<'tcx> {}
|
||||
@unblock = |_this| {
|
||||
panic!("we shouldn't wake up ever")
|
||||
}
|
||||
),
|
||||
);
|
||||
return interp_ok(());
|
||||
};
|
||||
|
||||
if this.mutex_is_locked(id) {
|
||||
if this.mutex_get_owner(id) == this.active_thread() {
|
||||
// Matching the current macOS implementation: abort on reentrant locking.
|
||||
@ -53,7 +92,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = this.os_unfair_lock_getid(lock_op)?;
|
||||
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
|
||||
// Trying to get a poisoned lock. That never works.
|
||||
this.write_scalar(Scalar::from_bool(false), dest)?;
|
||||
return interp_ok(());
|
||||
};
|
||||
|
||||
if this.mutex_is_locked(id) {
|
||||
// Contrary to the blocking lock function, this does not check for
|
||||
// reentrancy.
|
||||
@ -69,7 +113,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn os_unfair_lock_unlock(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = this.os_unfair_lock_getid(lock_op)?;
|
||||
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
|
||||
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
|
||||
throw_machine_stop!(TerminationInfo::Abort(
|
||||
"attempted to unlock an os_unfair_lock not owned by the current thread".to_owned()
|
||||
));
|
||||
};
|
||||
|
||||
// Now, unlock.
|
||||
if this.mutex_unlock(id)?.is_none() {
|
||||
// Matching the current macOS implementation: abort.
|
||||
throw_machine_stop!(TerminationInfo::Abort(
|
||||
@ -77,32 +128,56 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
));
|
||||
}
|
||||
|
||||
// If the lock is not locked by anyone now, it went quer.
|
||||
// Reset to zero so that it can be moved and initialized again for the next phase.
|
||||
if !this.mutex_is_locked(id) {
|
||||
let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
|
||||
this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
|
||||
}
|
||||
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn os_unfair_lock_assert_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = this.os_unfair_lock_getid(lock_op)?;
|
||||
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
|
||||
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
|
||||
throw_machine_stop!(TerminationInfo::Abort(
|
||||
"called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
|
||||
));
|
||||
};
|
||||
if !this.mutex_is_locked(id) || this.mutex_get_owner(id) != this.active_thread() {
|
||||
throw_machine_stop!(TerminationInfo::Abort(
|
||||
"called os_unfair_lock_assert_owner on an os_unfair_lock not owned by the current thread".to_owned()
|
||||
));
|
||||
}
|
||||
|
||||
// The lock is definitely not quiet since we are the owner.
|
||||
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn os_unfair_lock_assert_not_owner(&mut self, lock_op: &OpTy<'tcx>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = this.os_unfair_lock_getid(lock_op)?;
|
||||
let MacOsUnfairLock::Active { id } = this.os_unfair_lock_get_data(lock_op)? else {
|
||||
// The lock is poisoned, who knows who owns it... we'll pretend: someone else.
|
||||
return interp_ok(());
|
||||
};
|
||||
if this.mutex_is_locked(id) && this.mutex_get_owner(id) == this.active_thread() {
|
||||
throw_machine_stop!(TerminationInfo::Abort(
|
||||
"called os_unfair_lock_assert_not_owner on an os_unfair_lock owned by the current thread".to_owned()
|
||||
));
|
||||
}
|
||||
|
||||
// If the lock is not locked by anyone now, it went quer.
|
||||
// Reset to zero so that it can be moved and initialized again for the next phase.
|
||||
if !this.mutex_is_locked(id) {
|
||||
let lock_place = this.deref_pointer_as(lock_op, this.machine.layouts.u32)?;
|
||||
this.write_scalar_atomic(Scalar::from_u32(0), &lock_place, AtomicWriteOrd::Relaxed)?;
|
||||
}
|
||||
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
@ -31,16 +31,20 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
this.read_scalar(name)?,
|
||||
max_len,
|
||||
)?;
|
||||
let res = if res { Scalar::from_u32(0) } else { this.eval_libc("ERANGE") };
|
||||
this.write_scalar(res, dest)?;
|
||||
}
|
||||
"pthread_getname_np" => {
|
||||
let [thread, name, len] =
|
||||
this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
|
||||
// https://github.com/illumos/illumos-gate/blob/c56822be04b6c157c8b6f2281e47214c3b86f657/usr/src/lib/libc/port/threads/thr.c#L2449-L2480
|
||||
let res = this.pthread_getname_np(
|
||||
this.read_scalar(thread)?,
|
||||
this.read_scalar(name)?,
|
||||
this.read_scalar(len)?,
|
||||
/* truncate */ false,
|
||||
)?;
|
||||
let res = if res { Scalar::from_u32(0) } else { this.eval_libc("ERANGE") };
|
||||
this.write_scalar(res, dest)?;
|
||||
}
|
||||
|
||||
|
@ -2,10 +2,42 @@ use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
use rustc_target::abi::Size;
|
||||
|
||||
use crate::concurrency::sync::LAZY_INIT_COOKIE;
|
||||
use crate::*;
|
||||
|
||||
// pthread_mutexattr_t is either 4 or 8 bytes, depending on the platform.
|
||||
// We ignore the platform layout and store our own fields:
|
||||
/// Do a bytewise comparison of the two places, using relaxed atomic reads. This is used to check if
|
||||
/// a synchronization primitive matches its static initializer value.
|
||||
///
|
||||
/// The reads happen in chunks of 4, so all racing accesses must also use that access size.
|
||||
fn bytewise_equal_atomic_relaxed<'tcx>(
|
||||
ecx: &MiriInterpCx<'tcx>,
|
||||
left: &MPlaceTy<'tcx>,
|
||||
right: &MPlaceTy<'tcx>,
|
||||
) -> InterpResult<'tcx, bool> {
|
||||
let size = left.layout.size;
|
||||
assert_eq!(size, right.layout.size);
|
||||
|
||||
// We do this in chunks of 4, so that we are okay to race with (sufficiently aligned)
|
||||
// 4-byte atomic accesses.
|
||||
assert!(size.bytes() % 4 == 0);
|
||||
for i in 0..(size.bytes() / 4) {
|
||||
let offset = Size::from_bytes(i.strict_mul(4));
|
||||
let load = |place: &MPlaceTy<'tcx>| {
|
||||
let byte = place.offset(offset, ecx.machine.layouts.u32, ecx)?;
|
||||
ecx.read_scalar_atomic(&byte, AtomicReadOrd::Relaxed)?.to_u32()
|
||||
};
|
||||
let left = load(left)?;
|
||||
let right = load(right)?;
|
||||
if left != right {
|
||||
return interp_ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
interp_ok(true)
|
||||
}
|
||||
|
||||
// # pthread_mutexattr_t
|
||||
// We store some data directly inside the type, ignoring the platform layout:
|
||||
// - kind: i32
|
||||
|
||||
#[inline]
|
||||
@ -49,52 +81,72 @@ fn mutexattr_set_kind<'tcx>(
|
||||
/// field *not* PTHREAD_MUTEX_DEFAULT but this special flag.
|
||||
const PTHREAD_MUTEX_KIND_UNCHANGED: i32 = 0x8000000;
|
||||
|
||||
/// Translates the mutex kind from what is stored in pthread_mutexattr_t to our enum.
|
||||
fn mutexattr_translate_kind<'tcx>(
|
||||
ecx: &MiriInterpCx<'tcx>,
|
||||
kind: i32,
|
||||
) -> InterpResult<'tcx, MutexKind> {
|
||||
interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
|
||||
MutexKind::Normal
|
||||
} else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
|
||||
MutexKind::ErrorCheck
|
||||
} else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
|
||||
MutexKind::Recursive
|
||||
} else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
|
||||
|| kind == PTHREAD_MUTEX_KIND_UNCHANGED
|
||||
{
|
||||
// We check this *last* since PTHREAD_MUTEX_DEFAULT may be numerically equal to one of the
|
||||
// others, and we want an explicit `mutexattr_settype` to work as expected.
|
||||
MutexKind::Default
|
||||
} else {
|
||||
throw_unsup_format!("unsupported type of mutex: {kind}");
|
||||
})
|
||||
}
|
||||
|
||||
// # pthread_mutex_t
|
||||
// We store some data directly inside the type, ignoring the platform layout:
|
||||
// - init: u32
|
||||
|
||||
/// The mutex kind.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum MutexKind {
|
||||
enum MutexKind {
|
||||
Normal,
|
||||
Default,
|
||||
Recursive,
|
||||
ErrorCheck,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Additional data that we attach with each mutex instance.
|
||||
pub struct AdditionalMutexData {
|
||||
/// The mutex kind, used by some mutex implementations like pthreads mutexes.
|
||||
pub kind: MutexKind,
|
||||
|
||||
/// The address of the mutex.
|
||||
pub address: u64,
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct PthreadMutex {
|
||||
id: MutexId,
|
||||
kind: MutexKind,
|
||||
}
|
||||
|
||||
// pthread_mutex_t is between 4 and 48 bytes, depending on the platform.
|
||||
// We ignore the platform layout and store our own fields:
|
||||
// - id: u32
|
||||
|
||||
fn mutex_id_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
|
||||
// When adding a new OS, make sure we also support all its static initializers in
|
||||
// `mutex_kind_from_static_initializer`!
|
||||
/// To ensure an initialized mutex that was moved somewhere else can be distinguished from
|
||||
/// a statically initialized mutex that is used the first time, we pick some offset within
|
||||
/// `pthread_mutex_t` and use it as an "initialized" flag.
|
||||
fn mutex_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
|
||||
let offset = match &*ecx.tcx.sess.target.os {
|
||||
"linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
|
||||
// macOS stores a signature in the first bytes, so we have to move to offset 4.
|
||||
// macOS stores a signature in the first bytes, so we move to offset 4.
|
||||
"macos" => 4,
|
||||
os => throw_unsup_format!("`pthread_mutex` is not supported on {os}"),
|
||||
};
|
||||
let offset = Size::from_bytes(offset);
|
||||
|
||||
// Sanity-check this against PTHREAD_MUTEX_INITIALIZER (but only once):
|
||||
// the id must start out as 0.
|
||||
// FIXME on some platforms (e.g linux) there are more static initializers for
|
||||
// recursive or error checking mutexes. We should also add thme in this sanity check.
|
||||
// the `init` field must start out not equal to INIT_COOKIE.
|
||||
static SANITY: AtomicBool = AtomicBool::new(false);
|
||||
if !SANITY.swap(true, Ordering::Relaxed) {
|
||||
let check_static_initializer = |name| {
|
||||
let static_initializer = ecx.eval_path(&["libc", name]);
|
||||
let id_field = static_initializer
|
||||
.offset(Size::from_bytes(offset), ecx.machine.layouts.u32, ecx)
|
||||
.unwrap();
|
||||
let id = ecx.read_scalar(&id_field).unwrap().to_u32().unwrap();
|
||||
assert_eq!(id, 0, "{name} is incompatible with our pthread_mutex layout: id is not 0");
|
||||
let init_field =
|
||||
static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
|
||||
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
|
||||
assert_ne!(
|
||||
init, LAZY_INIT_COOKIE,
|
||||
"{name} is incompatible with our initialization cookie"
|
||||
);
|
||||
};
|
||||
|
||||
check_static_initializer("PTHREAD_MUTEX_INITIALIZER");
|
||||
@ -120,42 +172,33 @@ fn mutex_create<'tcx>(
|
||||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
mutex_ptr: &OpTy<'tcx>,
|
||||
kind: MutexKind,
|
||||
) -> InterpResult<'tcx> {
|
||||
) -> InterpResult<'tcx, PthreadMutex> {
|
||||
let mutex = ecx.deref_pointer(mutex_ptr)?;
|
||||
let address = mutex.ptr().addr().bytes();
|
||||
let data = Box::new(AdditionalMutexData { address, kind });
|
||||
ecx.mutex_create(&mutex, mutex_id_offset(ecx)?, Some(data))?;
|
||||
interp_ok(())
|
||||
let id = ecx.machine.sync.mutex_create();
|
||||
let data = PthreadMutex { id, kind };
|
||||
ecx.lazy_sync_init(&mutex, mutex_init_offset(ecx)?, data)?;
|
||||
interp_ok(data)
|
||||
}
|
||||
|
||||
/// Returns the `MutexId` of the mutex stored at `mutex_op`.
|
||||
///
|
||||
/// `mutex_get_id` will also check if the mutex has been moved since its first use and
|
||||
/// return an error if it has.
|
||||
fn mutex_get_id<'tcx>(
|
||||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
fn mutex_get_data<'tcx, 'a>(
|
||||
ecx: &'a mut MiriInterpCx<'tcx>,
|
||||
mutex_ptr: &OpTy<'tcx>,
|
||||
) -> InterpResult<'tcx, MutexId> {
|
||||
) -> InterpResult<'tcx, PthreadMutex> {
|
||||
let mutex = ecx.deref_pointer(mutex_ptr)?;
|
||||
let address = mutex.ptr().addr().bytes();
|
||||
|
||||
let id = ecx.mutex_get_or_create_id(&mutex, mutex_id_offset(ecx)?, |ecx| {
|
||||
// This is called if a static initializer was used and the lock has not been assigned
|
||||
// an ID yet. We have to determine the mutex kind from the static initializer.
|
||||
let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
|
||||
|
||||
interp_ok(Some(Box::new(AdditionalMutexData { kind, address })))
|
||||
})?;
|
||||
|
||||
// Check that the mutex has not been moved since last use.
|
||||
let data = ecx
|
||||
.mutex_get_data::<AdditionalMutexData>(id)
|
||||
.expect("data should always exist for pthreads");
|
||||
if data.address != address {
|
||||
throw_ub_format!("pthread_mutex_t can't be moved after first use")
|
||||
}
|
||||
|
||||
interp_ok(id)
|
||||
ecx.lazy_sync_get_data(
|
||||
&mutex,
|
||||
mutex_init_offset(ecx)?,
|
||||
|| throw_ub_format!("`pthread_mutex_t` can't be moved after first use"),
|
||||
|ecx| {
|
||||
let kind = mutex_kind_from_static_initializer(ecx, &mutex)?;
|
||||
let id = ecx.machine.sync.mutex_create();
|
||||
interp_ok(PthreadMutex { id, kind })
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns the kind of a static initializer.
|
||||
@ -163,107 +206,86 @@ fn mutex_kind_from_static_initializer<'tcx>(
|
||||
ecx: &MiriInterpCx<'tcx>,
|
||||
mutex: &MPlaceTy<'tcx>,
|
||||
) -> InterpResult<'tcx, MutexKind> {
|
||||
interp_ok(match &*ecx.tcx.sess.target.os {
|
||||
// Only linux has static initializers other than PTHREAD_MUTEX_DEFAULT.
|
||||
"linux" => {
|
||||
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
|
||||
let kind_place =
|
||||
mutex.offset(Size::from_bytes(offset), ecx.machine.layouts.i32, ecx)?;
|
||||
let kind = ecx.read_scalar(&kind_place)?.to_i32()?;
|
||||
// Here we give PTHREAD_MUTEX_DEFAULT priority so that
|
||||
// PTHREAD_MUTEX_INITIALIZER behaves like `pthread_mutex_init` with a NULL argument.
|
||||
if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT") {
|
||||
MutexKind::Default
|
||||
} else {
|
||||
mutex_translate_kind(ecx, kind)?
|
||||
}
|
||||
}
|
||||
_ => MutexKind::Default,
|
||||
})
|
||||
// All the static initializers recognized here *must* be checked in `mutex_init_offset`!
|
||||
let is_initializer =
|
||||
|name| bytewise_equal_atomic_relaxed(ecx, mutex, &ecx.eval_path(&["libc", name]));
|
||||
|
||||
// PTHREAD_MUTEX_INITIALIZER is recognized on all targets.
|
||||
if is_initializer("PTHREAD_MUTEX_INITIALIZER")? {
|
||||
return interp_ok(MutexKind::Default);
|
||||
}
|
||||
// Support additional platform-specific initializers.
|
||||
match &*ecx.tcx.sess.target.os {
|
||||
"linux" =>
|
||||
if is_initializer("PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP")? {
|
||||
return interp_ok(MutexKind::Recursive);
|
||||
} else if is_initializer("PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP")? {
|
||||
return interp_ok(MutexKind::ErrorCheck);
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
throw_unsup_format!("unsupported static initializer used for `pthread_mutex_t`");
|
||||
}
|
||||
|
||||
fn mutex_translate_kind<'tcx>(
|
||||
ecx: &MiriInterpCx<'tcx>,
|
||||
kind: i32,
|
||||
) -> InterpResult<'tcx, MutexKind> {
|
||||
interp_ok(if kind == (ecx.eval_libc_i32("PTHREAD_MUTEX_NORMAL")) {
|
||||
MutexKind::Normal
|
||||
} else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_ERRORCHECK") {
|
||||
MutexKind::ErrorCheck
|
||||
} else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_RECURSIVE") {
|
||||
MutexKind::Recursive
|
||||
} else if kind == ecx.eval_libc_i32("PTHREAD_MUTEX_DEFAULT")
|
||||
|| kind == PTHREAD_MUTEX_KIND_UNCHANGED
|
||||
{
|
||||
// We check this *last* since PTHREAD_MUTEX_DEFAULT may be numerically equal to one of the
|
||||
// others, and we want an explicit `mutexattr_settype` to work as expected.
|
||||
MutexKind::Default
|
||||
} else {
|
||||
throw_unsup_format!("unsupported type of mutex: {kind}");
|
||||
})
|
||||
// # pthread_rwlock_t
|
||||
// We store some data directly inside the type, ignoring the platform layout:
|
||||
// - init: u32
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
struct PthreadRwLock {
|
||||
id: RwLockId,
|
||||
}
|
||||
|
||||
// pthread_rwlock_t is between 4 and 56 bytes, depending on the platform.
|
||||
// We ignore the platform layout and store our own fields:
|
||||
// - id: u32
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Additional data that we attach with each rwlock instance.
|
||||
pub struct AdditionalRwLockData {
|
||||
/// The address of the rwlock.
|
||||
pub address: u64,
|
||||
}
|
||||
|
||||
fn rwlock_id_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
|
||||
fn rwlock_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
|
||||
let offset = match &*ecx.tcx.sess.target.os {
|
||||
"linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
|
||||
// macOS stores a signature in the first bytes, so we have to move to offset 4.
|
||||
// macOS stores a signature in the first bytes, so we move to offset 4.
|
||||
"macos" => 4,
|
||||
os => throw_unsup_format!("`pthread_rwlock` is not supported on {os}"),
|
||||
};
|
||||
let offset = Size::from_bytes(offset);
|
||||
|
||||
// Sanity-check this against PTHREAD_RWLOCK_INITIALIZER (but only once):
|
||||
// the id must start out as 0.
|
||||
// the `init` field must start out not equal to LAZY_INIT_COOKIE.
|
||||
static SANITY: AtomicBool = AtomicBool::new(false);
|
||||
if !SANITY.swap(true, Ordering::Relaxed) {
|
||||
let static_initializer = ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]);
|
||||
let id_field = static_initializer
|
||||
.offset(Size::from_bytes(offset), ecx.machine.layouts.u32, ecx)
|
||||
.unwrap();
|
||||
let id = ecx.read_scalar(&id_field).unwrap().to_u32().unwrap();
|
||||
assert_eq!(
|
||||
id, 0,
|
||||
"PTHREAD_RWLOCK_INITIALIZER is incompatible with our pthread_rwlock layout: id is not 0"
|
||||
let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
|
||||
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
|
||||
assert_ne!(
|
||||
init, LAZY_INIT_COOKIE,
|
||||
"PTHREAD_RWLOCK_INITIALIZER is incompatible with our initialization cookie"
|
||||
);
|
||||
}
|
||||
|
||||
interp_ok(offset)
|
||||
}
|
||||
|
||||
fn rwlock_get_id<'tcx>(
|
||||
fn rwlock_get_data<'tcx>(
|
||||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
rwlock_ptr: &OpTy<'tcx>,
|
||||
) -> InterpResult<'tcx, RwLockId> {
|
||||
) -> InterpResult<'tcx, PthreadRwLock> {
|
||||
let rwlock = ecx.deref_pointer(rwlock_ptr)?;
|
||||
let address = rwlock.ptr().addr().bytes();
|
||||
|
||||
let id = ecx.rwlock_get_or_create_id(&rwlock, rwlock_id_offset(ecx)?, |_| {
|
||||
interp_ok(Some(Box::new(AdditionalRwLockData { address })))
|
||||
})?;
|
||||
|
||||
// Check that the rwlock has not been moved since last use.
|
||||
let data = ecx
|
||||
.rwlock_get_data::<AdditionalRwLockData>(id)
|
||||
.expect("data should always exist for pthreads");
|
||||
if data.address != address {
|
||||
throw_ub_format!("pthread_rwlock_t can't be moved after first use")
|
||||
}
|
||||
|
||||
interp_ok(id)
|
||||
ecx.lazy_sync_get_data(
|
||||
&rwlock,
|
||||
rwlock_init_offset(ecx)?,
|
||||
|| throw_ub_format!("`pthread_rwlock_t` can't be moved after first use"),
|
||||
|ecx| {
|
||||
if !bytewise_equal_atomic_relaxed(
|
||||
ecx,
|
||||
&rwlock,
|
||||
&ecx.eval_path(&["libc", "PTHREAD_RWLOCK_INITIALIZER"]),
|
||||
)? {
|
||||
throw_unsup_format!("unsupported static initializer used for `pthread_rwlock_t`");
|
||||
}
|
||||
let id = ecx.machine.sync.rwlock_create();
|
||||
interp_ok(PthreadRwLock { id })
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// pthread_condattr_t.
|
||||
// We ignore the platform layout and store our own fields:
|
||||
// # pthread_condattr_t
|
||||
// We store some data directly inside the type, ignoring the platform layout:
|
||||
// - clock: i32
|
||||
|
||||
#[inline]
|
||||
@ -288,19 +310,6 @@ fn condattr_get_clock_id<'tcx>(
|
||||
.to_i32()
|
||||
}
|
||||
|
||||
fn cond_translate_clock_id<'tcx>(
|
||||
ecx: &MiriInterpCx<'tcx>,
|
||||
raw_id: i32,
|
||||
) -> InterpResult<'tcx, ClockId> {
|
||||
interp_ok(if raw_id == ecx.eval_libc_i32("CLOCK_REALTIME") {
|
||||
ClockId::Realtime
|
||||
} else if raw_id == ecx.eval_libc_i32("CLOCK_MONOTONIC") {
|
||||
ClockId::Monotonic
|
||||
} else {
|
||||
throw_unsup_format!("unsupported clock id: {raw_id}");
|
||||
})
|
||||
}
|
||||
|
||||
fn condattr_set_clock_id<'tcx>(
|
||||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
attr_ptr: &OpTy<'tcx>,
|
||||
@ -315,30 +324,43 @@ fn condattr_set_clock_id<'tcx>(
|
||||
)
|
||||
}
|
||||
|
||||
// pthread_cond_t can be only 4 bytes in size, depending on the platform.
|
||||
// We ignore the platform layout and store our own fields:
|
||||
// - id: u32
|
||||
/// Translates the clock from what is stored in pthread_condattr_t to our enum.
|
||||
fn condattr_translate_clock_id<'tcx>(
|
||||
ecx: &MiriInterpCx<'tcx>,
|
||||
raw_id: i32,
|
||||
) -> InterpResult<'tcx, ClockId> {
|
||||
interp_ok(if raw_id == ecx.eval_libc_i32("CLOCK_REALTIME") {
|
||||
ClockId::Realtime
|
||||
} else if raw_id == ecx.eval_libc_i32("CLOCK_MONOTONIC") {
|
||||
ClockId::Monotonic
|
||||
} else {
|
||||
throw_unsup_format!("unsupported clock id: {raw_id}");
|
||||
})
|
||||
}
|
||||
|
||||
fn cond_id_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, u64> {
|
||||
// # pthread_cond_t
|
||||
// We store some data directly inside the type, ignoring the platform layout:
|
||||
// - init: u32
|
||||
|
||||
fn cond_init_offset<'tcx>(ecx: &MiriInterpCx<'tcx>) -> InterpResult<'tcx, Size> {
|
||||
let offset = match &*ecx.tcx.sess.target.os {
|
||||
"linux" | "illumos" | "solaris" | "freebsd" | "android" => 0,
|
||||
// macOS stores a signature in the first bytes, so we have to move to offset 4.
|
||||
// macOS stores a signature in the first bytes, so we move to offset 4.
|
||||
"macos" => 4,
|
||||
os => throw_unsup_format!("`pthread_cond` is not supported on {os}"),
|
||||
};
|
||||
let offset = Size::from_bytes(offset);
|
||||
|
||||
// Sanity-check this against PTHREAD_COND_INITIALIZER (but only once):
|
||||
// the id must start out as 0.
|
||||
// the `init` field must start out not equal to LAZY_INIT_COOKIE.
|
||||
static SANITY: AtomicBool = AtomicBool::new(false);
|
||||
if !SANITY.swap(true, Ordering::Relaxed) {
|
||||
let static_initializer = ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]);
|
||||
let id_field = static_initializer
|
||||
.offset(Size::from_bytes(offset), ecx.machine.layouts.u32, ecx)
|
||||
.unwrap();
|
||||
let id = ecx.read_scalar(&id_field).unwrap().to_u32().unwrap();
|
||||
assert_eq!(
|
||||
id, 0,
|
||||
"PTHREAD_COND_INITIALIZER is incompatible with our pthread_cond layout: id is not 0"
|
||||
let init_field = static_initializer.offset(offset, ecx.machine.layouts.u32, ecx).unwrap();
|
||||
let init = ecx.read_scalar(&init_field).unwrap().to_u32().unwrap();
|
||||
assert_ne!(
|
||||
init, LAZY_INIT_COOKIE,
|
||||
"PTHREAD_COND_INITIALIZER is incompatible with our initialization cookie"
|
||||
);
|
||||
}
|
||||
|
||||
@ -351,36 +373,46 @@ enum ClockId {
|
||||
Monotonic,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Additional data that we attach with each cond instance.
|
||||
struct AdditionalCondData {
|
||||
/// The address of the cond.
|
||||
address: u64,
|
||||
|
||||
/// The clock id of the cond.
|
||||
clock_id: ClockId,
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
struct PthreadCondvar {
|
||||
id: CondvarId,
|
||||
clock: ClockId,
|
||||
}
|
||||
|
||||
fn cond_get_id<'tcx>(
|
||||
fn cond_create<'tcx>(
|
||||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
cond_ptr: &OpTy<'tcx>,
|
||||
) -> InterpResult<'tcx, CondvarId> {
|
||||
clock: ClockId,
|
||||
) -> InterpResult<'tcx, PthreadCondvar> {
|
||||
let cond = ecx.deref_pointer(cond_ptr)?;
|
||||
let address = cond.ptr().addr().bytes();
|
||||
let id = ecx.condvar_get_or_create_id(&cond, cond_id_offset(ecx)?, |_ecx| {
|
||||
// This used the static initializer. The clock there is always CLOCK_REALTIME.
|
||||
interp_ok(Some(Box::new(AdditionalCondData { address, clock_id: ClockId::Realtime })))
|
||||
})?;
|
||||
let id = ecx.machine.sync.condvar_create();
|
||||
let data = PthreadCondvar { id, clock };
|
||||
ecx.lazy_sync_init(&cond, cond_init_offset(ecx)?, data)?;
|
||||
interp_ok(data)
|
||||
}
|
||||
|
||||
// Check that the mutex has not been moved since last use.
|
||||
let data = ecx
|
||||
.condvar_get_data::<AdditionalCondData>(id)
|
||||
.expect("data should always exist for pthreads");
|
||||
if data.address != address {
|
||||
throw_ub_format!("pthread_cond_t can't be moved after first use")
|
||||
}
|
||||
|
||||
interp_ok(id)
|
||||
fn cond_get_data<'tcx>(
|
||||
ecx: &mut MiriInterpCx<'tcx>,
|
||||
cond_ptr: &OpTy<'tcx>,
|
||||
) -> InterpResult<'tcx, PthreadCondvar> {
|
||||
let cond = ecx.deref_pointer(cond_ptr)?;
|
||||
ecx.lazy_sync_get_data(
|
||||
&cond,
|
||||
cond_init_offset(ecx)?,
|
||||
|| throw_ub_format!("`pthread_cond_t` can't be moved after first use"),
|
||||
|ecx| {
|
||||
if !bytewise_equal_atomic_relaxed(
|
||||
ecx,
|
||||
&cond,
|
||||
&ecx.eval_path(&["libc", "PTHREAD_COND_INITIALIZER"]),
|
||||
)? {
|
||||
throw_unsup_format!("unsupported static initializer used for `pthread_cond_t`");
|
||||
}
|
||||
// This used the static initializer. The clock there is always CLOCK_REALTIME.
|
||||
let id = ecx.machine.sync.condvar_create();
|
||||
interp_ok(PthreadCondvar { id, clock: ClockId::Realtime })
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
impl<'tcx> EvalContextExt<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
@ -453,7 +485,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
let kind = if this.ptr_is_null(attr)? {
|
||||
MutexKind::Default
|
||||
} else {
|
||||
mutex_translate_kind(this, mutexattr_get_kind(this, attr_op)?)?
|
||||
mutexattr_translate_kind(this, mutexattr_get_kind(this, attr_op)?)?
|
||||
};
|
||||
|
||||
mutex_create(this, mutex_op, kind)?;
|
||||
@ -468,20 +500,16 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = mutex_get_id(this, mutex_op)?;
|
||||
let kind = this
|
||||
.mutex_get_data::<AdditionalMutexData>(id)
|
||||
.expect("data should always exist for pthread mutexes")
|
||||
.kind;
|
||||
let mutex = mutex_get_data(this, mutex_op)?;
|
||||
|
||||
let ret = if this.mutex_is_locked(id) {
|
||||
let owner_thread = this.mutex_get_owner(id);
|
||||
let ret = if this.mutex_is_locked(mutex.id) {
|
||||
let owner_thread = this.mutex_get_owner(mutex.id);
|
||||
if owner_thread != this.active_thread() {
|
||||
this.mutex_enqueue_and_block(id, Some((Scalar::from_i32(0), dest.clone())));
|
||||
this.mutex_enqueue_and_block(mutex.id, Some((Scalar::from_i32(0), dest.clone())));
|
||||
return interp_ok(());
|
||||
} else {
|
||||
// Trying to acquire the same mutex again.
|
||||
match kind {
|
||||
match mutex.kind {
|
||||
MutexKind::Default =>
|
||||
throw_ub_format!(
|
||||
"trying to acquire default mutex already locked by the current thread"
|
||||
@ -489,14 +517,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
MutexKind::Normal => throw_machine_stop!(TerminationInfo::Deadlock),
|
||||
MutexKind::ErrorCheck => this.eval_libc_i32("EDEADLK"),
|
||||
MutexKind::Recursive => {
|
||||
this.mutex_lock(id);
|
||||
this.mutex_lock(mutex.id);
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The mutex is unlocked. Let's lock it.
|
||||
this.mutex_lock(id);
|
||||
this.mutex_lock(mutex.id);
|
||||
0
|
||||
};
|
||||
this.write_scalar(Scalar::from_i32(ret), dest)?;
|
||||
@ -506,29 +534,25 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn pthread_mutex_trylock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = mutex_get_id(this, mutex_op)?;
|
||||
let kind = this
|
||||
.mutex_get_data::<AdditionalMutexData>(id)
|
||||
.expect("data should always exist for pthread mutexes")
|
||||
.kind;
|
||||
let mutex = mutex_get_data(this, mutex_op)?;
|
||||
|
||||
interp_ok(Scalar::from_i32(if this.mutex_is_locked(id) {
|
||||
let owner_thread = this.mutex_get_owner(id);
|
||||
interp_ok(Scalar::from_i32(if this.mutex_is_locked(mutex.id) {
|
||||
let owner_thread = this.mutex_get_owner(mutex.id);
|
||||
if owner_thread != this.active_thread() {
|
||||
this.eval_libc_i32("EBUSY")
|
||||
} else {
|
||||
match kind {
|
||||
match mutex.kind {
|
||||
MutexKind::Default | MutexKind::Normal | MutexKind::ErrorCheck =>
|
||||
this.eval_libc_i32("EBUSY"),
|
||||
MutexKind::Recursive => {
|
||||
this.mutex_lock(id);
|
||||
this.mutex_lock(mutex.id);
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// The mutex is unlocked. Let's lock it.
|
||||
this.mutex_lock(id);
|
||||
this.mutex_lock(mutex.id);
|
||||
0
|
||||
}))
|
||||
}
|
||||
@ -536,20 +560,16 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn pthread_mutex_unlock(&mut self, mutex_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = mutex_get_id(this, mutex_op)?;
|
||||
let kind = this
|
||||
.mutex_get_data::<AdditionalMutexData>(id)
|
||||
.expect("data should always exist for pthread mutexes")
|
||||
.kind;
|
||||
let mutex = mutex_get_data(this, mutex_op)?;
|
||||
|
||||
if let Some(_old_locked_count) = this.mutex_unlock(id)? {
|
||||
if let Some(_old_locked_count) = this.mutex_unlock(mutex.id)? {
|
||||
// The mutex was locked by the current thread.
|
||||
interp_ok(Scalar::from_i32(0))
|
||||
} else {
|
||||
// The mutex was locked by another thread or not locked at all. See
|
||||
// the “Unlock When Not Owner” column in
|
||||
// https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_mutex_unlock.html.
|
||||
match kind {
|
||||
match mutex.kind {
|
||||
MutexKind::Default =>
|
||||
throw_ub_format!(
|
||||
"unlocked a default mutex that was not locked by the current thread"
|
||||
@ -569,9 +589,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
// Reading the field also has the side-effect that we detect double-`destroy`
|
||||
// since we make the field unint below.
|
||||
let id = mutex_get_id(this, mutex_op)?;
|
||||
let mutex = mutex_get_data(this, mutex_op)?;
|
||||
|
||||
if this.mutex_is_locked(id) {
|
||||
if this.mutex_is_locked(mutex.id) {
|
||||
throw_ub_format!("destroyed a locked mutex");
|
||||
}
|
||||
|
||||
@ -591,7 +611,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_id(this, rwlock_op)?;
|
||||
let id = rwlock_get_data(this, rwlock_op)?.id;
|
||||
|
||||
if this.rwlock_is_write_locked(id) {
|
||||
this.rwlock_enqueue_and_block_reader(id, Scalar::from_i32(0), dest.clone());
|
||||
@ -606,7 +626,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn pthread_rwlock_tryrdlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_id(this, rwlock_op)?;
|
||||
let id = rwlock_get_data(this, rwlock_op)?.id;
|
||||
|
||||
if this.rwlock_is_write_locked(id) {
|
||||
interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
|
||||
@ -623,7 +643,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_id(this, rwlock_op)?;
|
||||
let id = rwlock_get_data(this, rwlock_op)?.id;
|
||||
|
||||
if this.rwlock_is_locked(id) {
|
||||
// Note: this will deadlock if the lock is already locked by this
|
||||
@ -650,7 +670,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn pthread_rwlock_trywrlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_id(this, rwlock_op)?;
|
||||
let id = rwlock_get_data(this, rwlock_op)?.id;
|
||||
|
||||
if this.rwlock_is_locked(id) {
|
||||
interp_ok(Scalar::from_i32(this.eval_libc_i32("EBUSY")))
|
||||
@ -663,7 +683,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
fn pthread_rwlock_unlock(&mut self, rwlock_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = rwlock_get_id(this, rwlock_op)?;
|
||||
let id = rwlock_get_data(this, rwlock_op)?.id;
|
||||
|
||||
#[allow(clippy::if_same_then_else)]
|
||||
if this.rwlock_reader_unlock(id)? || this.rwlock_writer_unlock(id)? {
|
||||
@ -678,7 +698,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
// Reading the field also has the side-effect that we detect double-`destroy`
|
||||
// since we make the field unint below.
|
||||
let id = rwlock_get_id(this, rwlock_op)?;
|
||||
let id = rwlock_get_data(this, rwlock_op)?.id;
|
||||
|
||||
if this.rwlock_is_locked(id) {
|
||||
throw_ub_format!("destroyed a locked rwlock");
|
||||
@ -773,29 +793,23 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
} else {
|
||||
condattr_get_clock_id(this, attr_op)?
|
||||
};
|
||||
let clock_id = cond_translate_clock_id(this, clock_id)?;
|
||||
let clock_id = condattr_translate_clock_id(this, clock_id)?;
|
||||
|
||||
let cond = this.deref_pointer(cond_op)?;
|
||||
let address = cond.ptr().addr().bytes();
|
||||
this.condvar_create(
|
||||
&cond,
|
||||
cond_id_offset(this)?,
|
||||
Some(Box::new(AdditionalCondData { address, clock_id })),
|
||||
)?;
|
||||
cond_create(this, cond_op, clock_id)?;
|
||||
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn pthread_cond_signal(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
|
||||
let this = self.eval_context_mut();
|
||||
let id = cond_get_id(this, cond_op)?;
|
||||
let id = cond_get_data(this, cond_op)?.id;
|
||||
this.condvar_signal(id)?;
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn pthread_cond_broadcast(&mut self, cond_op: &OpTy<'tcx>) -> InterpResult<'tcx, ()> {
|
||||
let this = self.eval_context_mut();
|
||||
let id = cond_get_id(this, cond_op)?;
|
||||
let id = cond_get_data(this, cond_op)?.id;
|
||||
while this.condvar_signal(id)? {}
|
||||
interp_ok(())
|
||||
}
|
||||
@ -808,11 +822,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = cond_get_id(this, cond_op)?;
|
||||
let mutex_id = mutex_get_id(this, mutex_op)?;
|
||||
let data = cond_get_data(this, cond_op)?;
|
||||
let mutex_id = mutex_get_data(this, mutex_op)?.id;
|
||||
|
||||
this.condvar_wait(
|
||||
id,
|
||||
data.id,
|
||||
mutex_id,
|
||||
None, // no timeout
|
||||
Scalar::from_i32(0),
|
||||
@ -832,14 +846,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = cond_get_id(this, cond_op)?;
|
||||
let mutex_id = mutex_get_id(this, mutex_op)?;
|
||||
let data = cond_get_data(this, cond_op)?;
|
||||
let mutex_id = mutex_get_data(this, mutex_op)?.id;
|
||||
|
||||
// Extract the timeout.
|
||||
let clock_id = this
|
||||
.condvar_get_data::<AdditionalCondData>(id)
|
||||
.expect("additional data should always be present for pthreads")
|
||||
.clock_id;
|
||||
let duration = match this
|
||||
.read_timespec(&this.deref_pointer_as(abstime_op, this.libc_ty_layout("timespec"))?)?
|
||||
{
|
||||
@ -850,7 +860,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
return interp_ok(());
|
||||
}
|
||||
};
|
||||
let timeout_clock = match clock_id {
|
||||
let timeout_clock = match data.clock {
|
||||
ClockId::Realtime => {
|
||||
this.check_no_isolation("`pthread_cond_timedwait` with `CLOCK_REALTIME`")?;
|
||||
TimeoutClock::RealTime
|
||||
@ -859,7 +869,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
};
|
||||
|
||||
this.condvar_wait(
|
||||
id,
|
||||
data.id,
|
||||
mutex_id,
|
||||
Some((timeout_clock, TimeoutAnchor::Absolute, duration)),
|
||||
Scalar::from_i32(0),
|
||||
@ -875,7 +885,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
// Reading the field also has the side-effect that we detect double-`destroy`
|
||||
// since we make the field unint below.
|
||||
let id = cond_get_id(this, cond_op)?;
|
||||
let id = cond_get_data(this, cond_op)?.id;
|
||||
if this.condvar_is_awaited(id) {
|
||||
throw_ub_format!("destroying an awaited conditional variable");
|
||||
}
|
||||
|
@ -63,38 +63,41 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
interp_ok(Scalar::from_uint(thread_id.to_u32(), this.libc_ty_layout("pthread_t").size))
|
||||
}
|
||||
|
||||
/// Set the name of the current thread. `max_name_len` is the maximal length of the name
|
||||
/// including the null terminator.
|
||||
/// Set the name of the specified thread. If the name including the null terminator
|
||||
/// is longer than `name_max_len`, then `false` is returned.
|
||||
fn pthread_setname_np(
|
||||
&mut self,
|
||||
thread: Scalar,
|
||||
name: Scalar,
|
||||
max_name_len: usize,
|
||||
) -> InterpResult<'tcx, Scalar> {
|
||||
name_max_len: usize,
|
||||
) -> InterpResult<'tcx, bool> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let thread = thread.to_int(this.libc_ty_layout("pthread_t").size)?;
|
||||
let thread = ThreadId::try_from(thread).unwrap();
|
||||
let name = name.to_pointer(this)?;
|
||||
|
||||
let name = this.read_c_str(name)?.to_owned();
|
||||
|
||||
// Comparing with `>=` to account for null terminator.
|
||||
if name.len() >= max_name_len {
|
||||
return interp_ok(this.eval_libc("ERANGE"));
|
||||
if name.len() >= name_max_len {
|
||||
return interp_ok(false);
|
||||
}
|
||||
|
||||
this.set_thread_name(thread, name);
|
||||
|
||||
interp_ok(Scalar::from_u32(0))
|
||||
interp_ok(true)
|
||||
}
|
||||
|
||||
/// Get the name of the specified thread. If the thread name doesn't fit
|
||||
/// the buffer, then if `truncate` is set the truncated name is written out,
|
||||
/// otherwise `false` is returned.
|
||||
fn pthread_getname_np(
|
||||
&mut self,
|
||||
thread: Scalar,
|
||||
name_out: Scalar,
|
||||
len: Scalar,
|
||||
) -> InterpResult<'tcx, Scalar> {
|
||||
truncate: bool,
|
||||
) -> InterpResult<'tcx, bool> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let thread = thread.to_int(this.libc_ty_layout("pthread_t").size)?;
|
||||
@ -104,9 +107,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
|
||||
// FIXME: we should use the program name if the thread name is not set
|
||||
let name = this.get_thread_name(thread).unwrap_or(b"<unnamed>").to_owned();
|
||||
let (success, _written) = this.write_c_str(&name, name_out, len)?;
|
||||
let name = match truncate {
|
||||
true => &name[..name.len().min(len.try_into().unwrap_or(usize::MAX).saturating_sub(1))],
|
||||
false => &name,
|
||||
};
|
||||
|
||||
interp_ok(if success { Scalar::from_u32(0) } else { this.eval_libc("ERANGE") })
|
||||
let (success, _written) = this.write_c_str(name, name_out, len)?;
|
||||
|
||||
interp_ok(success)
|
||||
}
|
||||
|
||||
fn sched_yield(&mut self) -> InterpResult<'tcx, ()> {
|
||||
|
@ -163,7 +163,7 @@ impl FileDescription for AnonSocket {
|
||||
} else {
|
||||
// Blocking socketpair with writer and empty buffer.
|
||||
// FIXME: blocking is currently not supported
|
||||
throw_unsup_format!("socketpair read: blocking isn't supported yet");
|
||||
throw_unsup_format!("socketpair/pipe/pipe2 read: blocking isn't supported yet");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -230,13 +230,13 @@ impl FileDescription for AnonSocket {
|
||||
return ecx.set_last_error_and_return(ErrorKind::WouldBlock, dest);
|
||||
} else {
|
||||
// Blocking socketpair with a full buffer.
|
||||
throw_unsup_format!("socketpair write: blocking isn't supported yet");
|
||||
throw_unsup_format!("socketpair/pipe/pipe2 write: blocking isn't supported yet");
|
||||
}
|
||||
}
|
||||
// Remember this clock so `read` can synchronize with us.
|
||||
if let Some(clock) = &ecx.release_clock() {
|
||||
ecx.release_clock(|clock| {
|
||||
writebuf.clock.join(clock);
|
||||
}
|
||||
});
|
||||
// Do full write / partial write based on the space available.
|
||||
let actual_write_size = len.min(available_space);
|
||||
let bytes = ecx.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
|
||||
@ -267,21 +267,24 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let domain = this.read_scalar(domain)?.to_i32()?;
|
||||
let mut type_ = this.read_scalar(type_)?.to_i32()?;
|
||||
let mut flags = this.read_scalar(type_)?.to_i32()?;
|
||||
let protocol = this.read_scalar(protocol)?.to_i32()?;
|
||||
let sv = this.deref_pointer(sv)?;
|
||||
|
||||
let mut is_sock_nonblock = false;
|
||||
|
||||
// Parse and remove the type flags that we support.
|
||||
// SOCK_NONBLOCK only exists on Linux.
|
||||
// Interpret the flag. Every flag we recognize is "subtracted" from `flags`, so
|
||||
// if there is anything left at the end, that's an unsupported flag.
|
||||
if this.tcx.sess.target.os == "linux" {
|
||||
if type_ & this.eval_libc_i32("SOCK_NONBLOCK") == this.eval_libc_i32("SOCK_NONBLOCK") {
|
||||
// SOCK_NONBLOCK only exists on Linux.
|
||||
let sock_nonblock = this.eval_libc_i32("SOCK_NONBLOCK");
|
||||
let sock_cloexec = this.eval_libc_i32("SOCK_CLOEXEC");
|
||||
if flags & sock_nonblock == sock_nonblock {
|
||||
is_sock_nonblock = true;
|
||||
type_ &= !(this.eval_libc_i32("SOCK_NONBLOCK"));
|
||||
flags &= !sock_nonblock;
|
||||
}
|
||||
if type_ & this.eval_libc_i32("SOCK_CLOEXEC") == this.eval_libc_i32("SOCK_CLOEXEC") {
|
||||
type_ &= !(this.eval_libc_i32("SOCK_CLOEXEC"));
|
||||
if flags & sock_cloexec == sock_cloexec {
|
||||
flags &= !sock_cloexec;
|
||||
}
|
||||
}
|
||||
|
||||
@ -294,11 +297,11 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
and AF_LOCAL are allowed",
|
||||
domain
|
||||
);
|
||||
} else if type_ != this.eval_libc_i32("SOCK_STREAM") {
|
||||
} else if flags != this.eval_libc_i32("SOCK_STREAM") {
|
||||
throw_unsup_format!(
|
||||
"socketpair: type {:#x} is unsupported, only SOCK_STREAM, \
|
||||
SOCK_CLOEXEC and SOCK_NONBLOCK are allowed",
|
||||
type_
|
||||
flags
|
||||
);
|
||||
} else if protocol != 0 {
|
||||
throw_unsup_format!(
|
||||
@ -347,14 +350,26 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let pipefd = this.deref_pointer_as(pipefd, this.machine.layouts.i32)?;
|
||||
let flags = match flags {
|
||||
let mut flags = match flags {
|
||||
Some(flags) => this.read_scalar(flags)?.to_i32()?,
|
||||
None => 0,
|
||||
};
|
||||
|
||||
// As usual we ignore CLOEXEC.
|
||||
let cloexec = this.eval_libc_i32("O_CLOEXEC");
|
||||
if flags != 0 && flags != cloexec {
|
||||
let o_nonblock = this.eval_libc_i32("O_NONBLOCK");
|
||||
|
||||
// Interpret the flag. Every flag we recognize is "subtracted" from `flags`, so
|
||||
// if there is anything left at the end, that's an unsupported flag.
|
||||
let mut is_nonblock = false;
|
||||
if flags & o_nonblock == o_nonblock {
|
||||
is_nonblock = true;
|
||||
flags &= !o_nonblock;
|
||||
}
|
||||
// As usual we ignore CLOEXEC.
|
||||
if flags & cloexec == cloexec {
|
||||
flags &= !cloexec;
|
||||
}
|
||||
if flags != 0 {
|
||||
throw_unsup_format!("unsupported flags in `pipe2`");
|
||||
}
|
||||
|
||||
@ -365,13 +380,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
readbuf: Some(RefCell::new(Buffer::new())),
|
||||
peer_fd: OnceCell::new(),
|
||||
peer_lost_data: Cell::new(false),
|
||||
is_nonblock: false,
|
||||
is_nonblock,
|
||||
});
|
||||
let fd1 = fds.new_ref(AnonSocket {
|
||||
readbuf: None,
|
||||
peer_fd: OnceCell::new(),
|
||||
peer_lost_data: Cell::new(false),
|
||||
is_nonblock: false,
|
||||
is_nonblock,
|
||||
});
|
||||
|
||||
// Make the file descriptions point to each other.
|
||||
|
@ -5,15 +5,35 @@ use rustc_target::abi::Size;
|
||||
use crate::concurrency::init_once::InitOnceStatus;
|
||||
use crate::*;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct WindowsInitOnce {
|
||||
id: InitOnceId,
|
||||
}
|
||||
|
||||
impl<'tcx> EvalContextExtPriv<'tcx> for crate::MiriInterpCx<'tcx> {}
|
||||
trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
// Windows sync primitives are pointer sized.
|
||||
// We only use the first 4 bytes for the id.
|
||||
|
||||
fn init_once_get_id(&mut self, init_once_ptr: &OpTy<'tcx>) -> InterpResult<'tcx, InitOnceId> {
|
||||
fn init_once_get_data(
|
||||
&mut self,
|
||||
init_once_ptr: &OpTy<'tcx>,
|
||||
) -> InterpResult<'tcx, WindowsInitOnce> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let init_once = this.deref_pointer(init_once_ptr)?;
|
||||
this.init_once_get_or_create_id(&init_once, 0)
|
||||
let init_offset = Size::ZERO;
|
||||
|
||||
this.lazy_sync_get_data(
|
||||
&init_once,
|
||||
init_offset,
|
||||
|| throw_ub_format!("`INIT_ONCE` can't be moved after first use"),
|
||||
|this| {
|
||||
// TODO: check that this is still all-zero.
|
||||
let id = this.machine.sync.init_once_create();
|
||||
interp_ok(WindowsInitOnce { id })
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns `true` if we were succssful, `false` if we would block.
|
||||
@ -55,7 +75,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = this.init_once_get_id(init_once_op)?;
|
||||
let id = this.init_once_get_data(init_once_op)?.id;
|
||||
let flags = this.read_scalar(flags_op)?.to_u32()?;
|
||||
let pending_place = this.deref_pointer(pending_op)?;
|
||||
let context = this.read_pointer(context_op)?;
|
||||
@ -101,7 +121,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
|
||||
) -> InterpResult<'tcx, Scalar> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let id = this.init_once_get_id(init_once_op)?;
|
||||
let id = this.init_once_get_data(init_once_op)?.id;
|
||||
let flags = this.read_scalar(flags_op)?.to_u32()?;
|
||||
let context = this.read_pointer(context_op)?;
|
||||
|
||||
|
@ -0,0 +1,13 @@
|
||||
//@only-target: darwin
|
||||
|
||||
use std::cell::UnsafeCell;
|
||||
|
||||
fn main() {
|
||||
let lock = UnsafeCell::new(libc::OS_UNFAIR_LOCK_INIT);
|
||||
|
||||
unsafe { libc::os_unfair_lock_lock(lock.get()) };
|
||||
let lock = lock;
|
||||
// This needs to either error or deadlock.
|
||||
unsafe { libc::os_unfair_lock_lock(lock.get()) };
|
||||
//~^ error: deadlock
|
||||
}
|
@ -0,0 +1,13 @@
|
||||
error: deadlock: the evaluated program deadlocked
|
||||
--> tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { libc::os_unfair_lock_lock(lock.get()) };
|
||||
| ^ the evaluated program deadlocked
|
||||
|
|
||||
= note: BACKTRACE:
|
||||
= note: inside `main` at tests/fail-dep/concurrency/apple_os_unfair_lock_move_deadlock.rs:LL:CC
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
error: aborting due to 1 previous error
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: pthread_cond_t can't be moved after first use
|
||||
error: Undefined Behavior: `pthread_cond_t` can't be moved after first use
|
||||
--> tests/fail-dep/concurrency/libc_pthread_cond_move.rs:LL:CC
|
||||
|
|
||||
LL | libc::pthread_cond_destroy(cond2.as_mut_ptr());
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ pthread_cond_t can't be moved after first use
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `pthread_cond_t` can't be moved after first use
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
|
@ -18,7 +18,7 @@ fn check() {
|
||||
// move pthread_cond_t
|
||||
let mut cond2 = cond;
|
||||
|
||||
libc::pthread_cond_destroy(cond2.as_mut_ptr()); //~[init] ERROR: pthread_cond_t can't be moved after first use
|
||||
libc::pthread_cond_destroy(cond2.as_mut_ptr()); //~[init] ERROR: can't be moved after first use
|
||||
}
|
||||
}
|
||||
|
||||
@ -32,6 +32,6 @@ fn check() {
|
||||
// move pthread_cond_t
|
||||
let mut cond2 = cond;
|
||||
|
||||
libc::pthread_cond_destroy(&mut cond2 as *mut _); //~[static_initializer] ERROR: pthread_cond_t can't be moved after first use
|
||||
libc::pthread_cond_destroy(&mut cond2 as *mut _); //~[static_initializer] ERROR: can't be moved after first use
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: pthread_cond_t can't be moved after first use
|
||||
error: Undefined Behavior: `pthread_cond_t` can't be moved after first use
|
||||
--> tests/fail-dep/concurrency/libc_pthread_cond_move.rs:LL:CC
|
||||
|
|
||||
LL | libc::pthread_cond_destroy(&mut cond2 as *mut _);
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ pthread_cond_t can't be moved after first use
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `pthread_cond_t` can't be moved after first use
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
|
@ -12,8 +12,9 @@ extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void {
|
||||
fn main() {
|
||||
unsafe {
|
||||
let mut native: libc::pthread_t = mem::zeroed();
|
||||
let attr: libc::pthread_attr_t = mem::zeroed();
|
||||
// assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented.
|
||||
assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0);
|
||||
assert_eq!(
|
||||
libc::pthread_create(&mut native, ptr::null(), thread_start, ptr::null_mut()),
|
||||
0
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -12,12 +12,13 @@ extern "C" fn thread_start() -> *mut libc::c_void {
|
||||
fn main() {
|
||||
unsafe {
|
||||
let mut native: libc::pthread_t = mem::zeroed();
|
||||
let attr: libc::pthread_attr_t = mem::zeroed();
|
||||
// assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented.
|
||||
let thread_start: extern "C" fn() -> *mut libc::c_void = thread_start;
|
||||
let thread_start: extern "C" fn(*mut libc::c_void) -> *mut libc::c_void =
|
||||
mem::transmute(thread_start);
|
||||
assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0);
|
||||
assert_eq!(
|
||||
libc::pthread_create(&mut native, ptr::null(), thread_start, ptr::null_mut()),
|
||||
0
|
||||
);
|
||||
assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0);
|
||||
}
|
||||
}
|
||||
|
@ -12,12 +12,13 @@ extern "C" fn thread_start(_null: *mut libc::c_void, _x: i32) -> *mut libc::c_vo
|
||||
fn main() {
|
||||
unsafe {
|
||||
let mut native: libc::pthread_t = mem::zeroed();
|
||||
let attr: libc::pthread_attr_t = mem::zeroed();
|
||||
// assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented.
|
||||
let thread_start: extern "C" fn(*mut libc::c_void, i32) -> *mut libc::c_void = thread_start;
|
||||
let thread_start: extern "C" fn(*mut libc::c_void) -> *mut libc::c_void =
|
||||
mem::transmute(thread_start);
|
||||
assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0);
|
||||
assert_eq!(
|
||||
libc::pthread_create(&mut native, ptr::null(), thread_start, ptr::null_mut()),
|
||||
0
|
||||
);
|
||||
assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0);
|
||||
}
|
||||
}
|
||||
|
@ -11,9 +11,10 @@ extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void {
|
||||
fn main() {
|
||||
unsafe {
|
||||
let mut native: libc::pthread_t = mem::zeroed();
|
||||
let attr: libc::pthread_attr_t = mem::zeroed();
|
||||
// assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented.
|
||||
assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0);
|
||||
assert_eq!(
|
||||
libc::pthread_create(&mut native, ptr::null(), thread_start, ptr::null_mut()),
|
||||
0
|
||||
);
|
||||
assert_eq!(libc::pthread_detach(native), 0);
|
||||
assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join a detached thread
|
||||
}
|
||||
|
@ -11,9 +11,10 @@ extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void {
|
||||
fn main() {
|
||||
unsafe {
|
||||
let mut native: libc::pthread_t = mem::zeroed();
|
||||
let attr: libc::pthread_attr_t = mem::zeroed();
|
||||
// assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented.
|
||||
assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0);
|
||||
assert_eq!(
|
||||
libc::pthread_create(&mut native, ptr::null(), thread_start, ptr::null_mut()),
|
||||
0
|
||||
);
|
||||
assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0);
|
||||
assert_eq!(libc::pthread_join(native, ptr::null_mut()), 0); //~ ERROR: Undefined Behavior: trying to join an already joined thread
|
||||
}
|
||||
|
@ -14,9 +14,10 @@ extern "C" fn thread_start(_null: *mut libc::c_void) -> *mut libc::c_void {
|
||||
fn main() {
|
||||
unsafe {
|
||||
let mut native: libc::pthread_t = mem::zeroed();
|
||||
let attr: libc::pthread_attr_t = mem::zeroed();
|
||||
// assert_eq!(libc::pthread_attr_init(&mut attr), 0); FIXME: this function is not yet implemented.
|
||||
assert_eq!(libc::pthread_create(&mut native, &attr, thread_start, ptr::null_mut()), 0);
|
||||
assert_eq!(
|
||||
libc::pthread_create(&mut native, ptr::null(), thread_start, ptr::null_mut()),
|
||||
0
|
||||
);
|
||||
let mut native_copy: libc::pthread_t = mem::zeroed();
|
||||
ptr::copy_nonoverlapping(&native, &mut native_copy, 1);
|
||||
let handle = thread::spawn(move || {
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: pthread_mutex_t can't be moved after first use
|
||||
error: Undefined Behavior: `pthread_mutex_t` can't be moved after first use
|
||||
--> tests/fail-dep/concurrency/libc_pthread_mutex_move.rs:LL:CC
|
||||
|
|
||||
LL | libc::pthread_mutex_lock(&mut m2 as *mut _);
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ pthread_mutex_t can't be moved after first use
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `pthread_mutex_t` can't be moved after first use
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
|
@ -12,7 +12,7 @@ fn check() {
|
||||
assert_eq!(libc::pthread_mutex_init(&mut m as *mut _, std::ptr::null()), 0);
|
||||
|
||||
let mut m2 = m; // move the mutex
|
||||
libc::pthread_mutex_lock(&mut m2 as *mut _); //~[init] ERROR: pthread_mutex_t can't be moved after first use
|
||||
libc::pthread_mutex_lock(&mut m2 as *mut _); //~[init] ERROR: can't be moved after first use
|
||||
}
|
||||
}
|
||||
|
||||
@ -23,6 +23,6 @@ fn check() {
|
||||
libc::pthread_mutex_lock(&mut m as *mut _);
|
||||
|
||||
let mut m2 = m; // move the mutex
|
||||
libc::pthread_mutex_unlock(&mut m2 as *mut _); //~[static_initializer] ERROR: pthread_mutex_t can't be moved after first use
|
||||
libc::pthread_mutex_unlock(&mut m2 as *mut _); //~[static_initializer] ERROR: can't be moved after first use
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: pthread_mutex_t can't be moved after first use
|
||||
error: Undefined Behavior: `pthread_mutex_t` can't be moved after first use
|
||||
--> tests/fail-dep/concurrency/libc_pthread_mutex_move.rs:LL:CC
|
||||
|
|
||||
LL | libc::pthread_mutex_unlock(&mut m2 as *mut _);
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ pthread_mutex_t can't be moved after first use
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `pthread_mutex_t` can't be moved after first use
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
|
@ -9,6 +9,6 @@ fn main() {
|
||||
// Move rwlock
|
||||
let mut rw2 = rw;
|
||||
|
||||
libc::pthread_rwlock_unlock(&mut rw2 as *mut _); //~ ERROR: pthread_rwlock_t can't be moved after first use
|
||||
libc::pthread_rwlock_unlock(&mut rw2 as *mut _); //~ ERROR: can't be moved after first use
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: pthread_rwlock_t can't be moved after first use
|
||||
error: Undefined Behavior: `pthread_rwlock_t` can't be moved after first use
|
||||
--> tests/fail-dep/concurrency/libx_pthread_rwlock_moved.rs:LL:CC
|
||||
|
|
||||
LL | libc::pthread_rwlock_unlock(&mut rw2 as *mut _);
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ pthread_rwlock_t can't be moved after first use
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `pthread_rwlock_t` can't be moved after first use
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
|
@ -8,5 +8,5 @@ fn main() {
|
||||
fn test_file_open_missing_needed_mode() {
|
||||
let name = b"missing_arg.txt\0";
|
||||
let name_ptr = name.as_ptr().cast::<libc::c_char>();
|
||||
let _fd = unsafe { libc::open(name_ptr, libc::O_CREAT) }; //~ ERROR: Undefined Behavior: incorrect number of arguments for `open` with `O_CREAT`: got 2, expected at least 3
|
||||
let _fd = unsafe { libc::open(name_ptr, libc::O_CREAT) }; //~ ERROR: Undefined Behavior: incorrect number of arguments for `open(pathname, O_CREAT, ...)`: got 2, expected at least 3
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: Undefined Behavior: incorrect number of arguments for `open` with `O_CREAT`: got 2, expected at least 3
|
||||
error: Undefined Behavior: incorrect number of arguments for `open(pathname, O_CREAT, ...)`: got 2, expected at least 3
|
||||
--> tests/fail-dep/libc/fs/unix_open_missing_required_mode.rs:LL:CC
|
||||
|
|
||||
LL | ...safe { libc::open(name_ptr, libc::O_CREAT) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ incorrect number of arguments for `open` with `O_CREAT`: got 2, expected at least 3
|
||||
LL | ... { libc::open(name_ptr, libc::O_CREAT) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ incorrect number of arguments for `open(pathname, O_CREAT, ...)`: got 2, expected at least 3
|
||||
|
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
|
97
src/tools/miri/tests/fail-dep/libc/libc-epoll-data-race.rs
Normal file
97
src/tools/miri/tests/fail-dep/libc/libc-epoll-data-race.rs
Normal file
@ -0,0 +1,97 @@
|
||||
//! This ensures that when an epoll_wait wakes up and there are multiple events,
|
||||
//! and we only read one of them, we do not synchronize with the other events
|
||||
//! and therefore still report a data race for things that need to see the second event
|
||||
//! to be considered synchronized.
|
||||
//@only-target: linux
|
||||
// ensure deterministic schedule
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
|
||||
use std::convert::TryInto;
|
||||
use std::thread;
|
||||
use std::thread::spawn;
|
||||
|
||||
#[track_caller]
|
||||
fn check_epoll_wait<const N: usize>(epfd: i32, expected_notifications: &[(u32, u64)]) {
|
||||
let epoll_event = libc::epoll_event { events: 0, u64: 0 };
|
||||
let mut array: [libc::epoll_event; N] = [epoll_event; N];
|
||||
let maxsize = N;
|
||||
let array_ptr = array.as_mut_ptr();
|
||||
let res = unsafe { libc::epoll_wait(epfd, array_ptr, maxsize.try_into().unwrap(), 0) };
|
||||
if res < 0 {
|
||||
panic!("epoll_wait failed: {}", std::io::Error::last_os_error());
|
||||
}
|
||||
assert_eq!(
|
||||
res,
|
||||
expected_notifications.len().try_into().unwrap(),
|
||||
"got wrong number of notifications"
|
||||
);
|
||||
let slice = unsafe { std::slice::from_raw_parts(array_ptr, res.try_into().unwrap()) };
|
||||
for (return_event, expected_event) in slice.iter().zip(expected_notifications.iter()) {
|
||||
let event = return_event.events;
|
||||
let data = return_event.u64;
|
||||
assert_eq!(event, expected_event.0, "got wrong events");
|
||||
assert_eq!(data, expected_event.1, "got wrong data");
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Create an epoll instance.
|
||||
let epfd = unsafe { libc::epoll_create1(0) };
|
||||
assert_ne!(epfd, -1);
|
||||
|
||||
// Create two socketpair instances.
|
||||
let mut fds_a = [-1, -1];
|
||||
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds_a.as_mut_ptr()) };
|
||||
assert_eq!(res, 0);
|
||||
|
||||
let mut fds_b = [-1, -1];
|
||||
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds_b.as_mut_ptr()) };
|
||||
assert_eq!(res, 0);
|
||||
|
||||
// Register both pipe read ends.
|
||||
let mut ev = libc::epoll_event {
|
||||
events: (libc::EPOLLIN | libc::EPOLLET) as _,
|
||||
u64: u64::try_from(fds_a[1]).unwrap(),
|
||||
};
|
||||
let res = unsafe { libc::epoll_ctl(epfd, libc::EPOLL_CTL_ADD, fds_a[1], &mut ev) };
|
||||
assert_eq!(res, 0);
|
||||
|
||||
let mut ev = libc::epoll_event {
|
||||
events: (libc::EPOLLIN | libc::EPOLLET) as _,
|
||||
u64: u64::try_from(fds_b[1]).unwrap(),
|
||||
};
|
||||
let res = unsafe { libc::epoll_ctl(epfd, libc::EPOLL_CTL_ADD, fds_b[1], &mut ev) };
|
||||
assert_eq!(res, 0);
|
||||
|
||||
static mut VAL_ONE: u8 = 40; // This one will be read soundly.
|
||||
static mut VAL_TWO: u8 = 50; // This one will be read unsoundly.
|
||||
let thread1 = spawn(move || {
|
||||
unsafe { VAL_ONE = 41 };
|
||||
|
||||
let data = "abcde".as_bytes().as_ptr();
|
||||
let res = unsafe { libc::write(fds_a[0], data as *const libc::c_void, 5) };
|
||||
assert_eq!(res, 5);
|
||||
|
||||
unsafe { VAL_TWO = 51 };
|
||||
|
||||
let res = unsafe { libc::write(fds_b[0], data as *const libc::c_void, 5) };
|
||||
assert_eq!(res, 5);
|
||||
});
|
||||
thread::yield_now();
|
||||
|
||||
// With room for one event: check result from epoll_wait.
|
||||
let expected_event = u32::try_from(libc::EPOLLIN).unwrap();
|
||||
let expected_value = u64::try_from(fds_a[1]).unwrap();
|
||||
check_epoll_wait::<1>(epfd, &[(expected_event, expected_value)]);
|
||||
|
||||
// Since we only received one event, we have synchronized with
|
||||
// the write to VAL_ONE but not with the one to VAL_TWO.
|
||||
unsafe {
|
||||
assert_eq!({ VAL_ONE }, 41) // This one is not UB
|
||||
};
|
||||
unsafe {
|
||||
assert_eq!({ VAL_TWO }, 51) //~ERROR: Data race detected
|
||||
};
|
||||
|
||||
thread1.join().unwrap();
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
error: Undefined Behavior: Data race detected between (1) non-atomic write on thread `unnamed-ID` and (2) non-atomic read on thread `main` at ALLOC. (2) just happened here
|
||||
--> tests/fail-dep/libc/libc-epoll-data-race.rs:LL:CC
|
||||
|
|
||||
LL | assert_eq!({ VAL_TWO }, 51)
|
||||
| ^^^^^^^ Data race detected between (1) non-atomic write on thread `unnamed-ID` and (2) non-atomic read on thread `main` at ALLOC. (2) just happened here
|
||||
|
|
||||
help: and (1) occurred earlier here
|
||||
--> tests/fail-dep/libc/libc-epoll-data-race.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { VAL_TWO = 51 };
|
||||
| ^^^^^^^^^^^^
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
= note: BACKTRACE (of the first span):
|
||||
= note: inside `main` at tests/fail-dep/libc/libc-epoll-data-race.rs:LL:CC
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
error: aborting due to 1 previous error
|
||||
|
31
src/tools/miri/tests/fail-dep/libc/socketpair-data-race.rs
Normal file
31
src/tools/miri/tests/fail-dep/libc/socketpair-data-race.rs
Normal file
@ -0,0 +1,31 @@
|
||||
//! This is a regression test for <https://github.com/rust-lang/miri/issues/3947>: we had some
|
||||
//! faulty logic around `release_clock` that led to this code not reporting a data race.
|
||||
//@ignore-target: windows # no libc socketpair on Windows
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
use std::thread;
|
||||
|
||||
fn main() {
|
||||
static mut VAL: u8 = 0;
|
||||
let mut fds = [-1, -1];
|
||||
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
|
||||
assert_eq!(res, 0);
|
||||
let thread1 = thread::spawn(move || {
|
||||
let data = "a".as_bytes().as_ptr();
|
||||
let res = unsafe { libc::write(fds[0], data as *const libc::c_void, 1) };
|
||||
assert_eq!(res, 1);
|
||||
// The write to VAL is *after* the write to the socket, so there's no proper synchronization.
|
||||
unsafe { VAL = 1 };
|
||||
});
|
||||
thread::yield_now();
|
||||
|
||||
let mut buf: [u8; 1] = [0; 1];
|
||||
let res: i32 = unsafe {
|
||||
libc::read(fds[1], buf.as_mut_ptr().cast(), buf.len() as libc::size_t).try_into().unwrap()
|
||||
};
|
||||
assert_eq!(res, 1);
|
||||
assert_eq!(buf, "a".as_bytes());
|
||||
|
||||
unsafe { assert_eq!({ VAL }, 1) }; //~ERROR: Data race
|
||||
|
||||
thread1.join().unwrap();
|
||||
}
|
@ -0,0 +1,20 @@
|
||||
error: Undefined Behavior: Data race detected between (1) non-atomic write on thread `unnamed-ID` and (2) non-atomic read on thread `main` at ALLOC. (2) just happened here
|
||||
--> tests/fail-dep/libc/socketpair-data-race.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { assert_eq!({ VAL }, 1) };
|
||||
| ^^^ Data race detected between (1) non-atomic write on thread `unnamed-ID` and (2) non-atomic read on thread `main` at ALLOC. (2) just happened here
|
||||
|
|
||||
help: and (1) occurred earlier here
|
||||
--> tests/fail-dep/libc/socketpair-data-race.rs:LL:CC
|
||||
|
|
||||
LL | unsafe { VAL = 1 };
|
||||
| ^^^^^^^
|
||||
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
|
||||
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
|
||||
= note: BACKTRACE (of the first span):
|
||||
= note: inside `main` at tests/fail-dep/libc/socketpair-data-race.rs:LL:CC
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
error: aborting due to 1 previous error
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: unsupported operation: socketpair read: blocking isn't supported yet
|
||||
error: unsupported operation: socketpair/pipe/pipe2 read: blocking isn't supported yet
|
||||
--> tests/fail-dep/libc/socketpair_read_blocking.rs:LL:CC
|
||||
|
|
||||
LL | let _res = unsafe { libc::read(fds[1], buf.as_mut_ptr().cast(), buf.len() as libc::size_t) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ socketpair read: blocking isn't supported yet
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ socketpair/pipe/pipe2 read: blocking isn't supported yet
|
||||
|
|
||||
= help: this is likely not a bug in the program; it indicates that the program performed an operation that Miri does not support
|
||||
= note: BACKTRACE:
|
||||
|
@ -1,8 +1,8 @@
|
||||
error: unsupported operation: socketpair write: blocking isn't supported yet
|
||||
error: unsupported operation: socketpair/pipe/pipe2 write: blocking isn't supported yet
|
||||
--> tests/fail-dep/libc/socketpair_write_blocking.rs:LL:CC
|
||||
|
|
||||
LL | let _ = unsafe { libc::write(fds[0], data as *const libc::c_void, 3) };
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ socketpair write: blocking isn't supported yet
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ socketpair/pipe/pipe2 write: blocking isn't supported yet
|
||||
|
|
||||
= help: this is likely not a bug in the program; it indicates that the program performed an operation that Miri does not support
|
||||
= note: BACKTRACE:
|
||||
|
@ -0,0 +1,16 @@
|
||||
//@error-in-other-file: deadlock
|
||||
//@normalize-stderr-test: "src/sys/.*\.rs" -> "$$FILE"
|
||||
//@normalize-stderr-test: "LL \| .*" -> "LL | $$CODE"
|
||||
//@normalize-stderr-test: "\| +\^+" -> "| ^"
|
||||
//@normalize-stderr-test: "\n *= note:.*" -> ""
|
||||
use std::mem;
|
||||
use std::sync::Mutex;
|
||||
|
||||
fn main() {
|
||||
let m = Mutex::new(0);
|
||||
mem::forget(m.lock());
|
||||
// Move the lock while it is "held" (really: leaked)
|
||||
let m2 = m;
|
||||
// Now try to acquire the lock again.
|
||||
let _guard = m2.lock();
|
||||
}
|
@ -0,0 +1,16 @@
|
||||
error: deadlock: the evaluated program deadlocked
|
||||
--> RUSTLIB/std/$FILE:LL:CC
|
||||
|
|
||||
LL | $CODE
|
||||
| ^ the evaluated program deadlocked
|
||||
|
|
||||
note: inside `main`
|
||||
--> tests/fail/concurrency/mutex-leak-move-deadlock.rs:LL:CC
|
||||
|
|
||||
LL | $CODE
|
||||
| ^
|
||||
|
||||
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
|
||||
|
||||
error: aborting due to 1 previous error
|
||||
|
@ -16,12 +16,15 @@ impl Foo {
|
||||
|
||||
pub fn main() {
|
||||
let mut f = Foo(0);
|
||||
let inner = &mut f.0 as *mut u64;
|
||||
let _res = f.add(unsafe {
|
||||
let n = f.0;
|
||||
let alias = &mut f.0 as *mut u64;
|
||||
let res = f.add(unsafe {
|
||||
// This is the access at fault, but it's not immediately apparent because
|
||||
// the reference that got invalidated is not under a Protector.
|
||||
*inner = 42;
|
||||
n
|
||||
*alias = 42;
|
||||
0
|
||||
});
|
||||
// `res` could be optimized to be `0`, since at the time the reference for the `self` argument
|
||||
// is created, it has value `0`, and then later we add `0` to that. But turns out there is
|
||||
// a sneaky alias that's used to change the value of `*self` before it is read...
|
||||
assert_eq!(res, 42);
|
||||
}
|
||||
|
@ -9,12 +9,12 @@ LL | fn add(&mut self, n: u64) -> u64 {
|
||||
help: the accessed tag <TAG> was created here, in the initial state Reserved
|
||||
--> tests/fail/tree_borrows/write-during-2phase.rs:LL:CC
|
||||
|
|
||||
LL | let _res = f.add(unsafe {
|
||||
| ^
|
||||
LL | let res = f.add(unsafe {
|
||||
| ^
|
||||
help: the accessed tag <TAG> later transitioned to Disabled due to a foreign write access at offsets [0x0..0x8]
|
||||
--> tests/fail/tree_borrows/write-during-2phase.rs:LL:CC
|
||||
|
|
||||
LL | *inner = 42;
|
||||
LL | *alias = 42;
|
||||
| ^^^^^^^^^^^
|
||||
= help: this transition corresponds to a loss of read and write permissions
|
||||
= note: BACKTRACE (of the first span):
|
||||
@ -22,13 +22,12 @@ LL | *inner = 42;
|
||||
note: inside `main`
|
||||
--> tests/fail/tree_borrows/write-during-2phase.rs:LL:CC
|
||||
|
|
||||
LL | let _res = f.add(unsafe {
|
||||
| ________________^
|
||||
LL | | let n = f.0;
|
||||
LL | let res = f.add(unsafe {
|
||||
| _______________^
|
||||
LL | | // This is the access at fault, but it's not immediately apparent because
|
||||
LL | | // the reference that got invalidated is not under a Protector.
|
||||
LL | | *inner = 42;
|
||||
LL | | n
|
||||
LL | | *alias = 42;
|
||||
LL | | 0
|
||||
LL | | });
|
||||
| |______^
|
||||
|
||||
|
@ -1,12 +0,0 @@
|
||||
//@compile-flags: -Zmiri-panic-on-unsupported
|
||||
//@normalize-stderr-test: "OS `.*`" -> "$$OS"
|
||||
|
||||
fn main() {
|
||||
extern "Rust" {
|
||||
fn foo();
|
||||
}
|
||||
|
||||
unsafe {
|
||||
foo();
|
||||
}
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
thread 'main' panicked at tests/panic/unsupported_foreign_function.rs:LL:CC:
|
||||
unsupported Miri functionality: can't call foreign function `foo` on $OS
|
||||
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
|
||||
note: in Miri, you may have to set `MIRIFLAGS=-Zmiri-env-forward=RUST_BACKTRACE` for the environment variable to have an effect
|
@ -1,9 +0,0 @@
|
||||
//@ignore-target: windows # no `syscall` on Windows
|
||||
//@ignore-target: apple # `syscall` is not supported on macOS
|
||||
//@compile-flags: -Zmiri-panic-on-unsupported
|
||||
|
||||
fn main() {
|
||||
unsafe {
|
||||
libc::syscall(0);
|
||||
}
|
||||
}
|
@ -1,4 +0,0 @@
|
||||
thread 'main' panicked at tests/panic/unsupported_syscall.rs:LL:CC:
|
||||
unsupported Miri functionality: can't execute syscall with ID 0
|
||||
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
|
||||
note: in Miri, you may have to set `MIRIFLAGS=-Zmiri-env-forward=RUST_BACKTRACE` for the environment variable to have an effect
|
@ -16,8 +16,8 @@ fn main() {
|
||||
|
||||
// `os_unfair_lock`s can be moved and leaked.
|
||||
// In the real implementation, even moving it while locked is possible
|
||||
// (and "forks" the lock, i.e. old and new location have independent wait queues);
|
||||
// Miri behavior differs here and anyway none of this is documented.
|
||||
// (and "forks" the lock, i.e. old and new location have independent wait queues).
|
||||
// We only test the somewhat sane case of moving while unlocked that `std` plans to rely on.
|
||||
let lock = lock;
|
||||
let locked = unsafe { libc::os_unfair_lock_trylock(lock.get()) };
|
||||
assert!(locked);
|
||||
|
@ -161,7 +161,7 @@ fn test_epoll_race() {
|
||||
// Write to the eventfd instance.
|
||||
let sized_8_data: [u8; 8] = 1_u64.to_ne_bytes();
|
||||
let res = unsafe { libc::write(fd, sized_8_data.as_ptr() as *const libc::c_void, 8) };
|
||||
// read returns number of bytes that have been read, which is always 8.
|
||||
// write returns number of bytes written, which is always 8.
|
||||
assert_eq!(res, 8);
|
||||
});
|
||||
thread::yield_now();
|
||||
|
@ -10,6 +10,7 @@ use std::thread;
|
||||
fn main() {
|
||||
test_read_write();
|
||||
test_race();
|
||||
test_syscall();
|
||||
}
|
||||
|
||||
fn read_bytes<const N: usize>(fd: i32, buf: &mut [u8; N]) -> i32 {
|
||||
@ -109,3 +110,11 @@ fn test_race() {
|
||||
thread::yield_now();
|
||||
thread1.join().unwrap();
|
||||
}
|
||||
|
||||
// This is a test for calling eventfd2 through a syscall.
|
||||
fn test_syscall() {
|
||||
let initval = 0 as libc::c_uint;
|
||||
let flags = (libc::EFD_CLOEXEC | libc::EFD_NONBLOCK) as libc::c_int;
|
||||
let fd = unsafe { libc::syscall(libc::SYS_eventfd2, initval, flags) };
|
||||
assert_ne!(fd, -1);
|
||||
}
|
||||
|
@ -7,6 +7,14 @@ fn main() {
|
||||
test_pipe_threaded();
|
||||
test_race();
|
||||
test_pipe_array();
|
||||
#[cfg(any(
|
||||
target_os = "linux",
|
||||
target_os = "illumos",
|
||||
target_os = "freebsd",
|
||||
target_os = "solaris"
|
||||
))]
|
||||
// `pipe2` only exists in some specific os.
|
||||
test_pipe2();
|
||||
}
|
||||
|
||||
fn test_pipe() {
|
||||
@ -110,3 +118,16 @@ fn test_pipe_array() {
|
||||
let mut fds: [i32; 2] = [0; 2];
|
||||
assert_eq!(unsafe { pipe(&mut fds) }, 0);
|
||||
}
|
||||
|
||||
/// Test if pipe2 (including the O_NONBLOCK flag) is supported.
|
||||
#[cfg(any(
|
||||
target_os = "linux",
|
||||
target_os = "illumos",
|
||||
target_os = "freebsd",
|
||||
target_os = "solaris"
|
||||
))]
|
||||
fn test_pipe2() {
|
||||
let mut fds = [-1, -1];
|
||||
let res = unsafe { libc::pipe2(fds.as_mut_ptr(), libc::O_NONBLOCK) };
|
||||
assert_eq!(res, 0);
|
||||
}
|
||||
|
@ -1,10 +1,27 @@
|
||||
//@ignore-target: windows # No pthreads on Windows
|
||||
use std::ffi::CStr;
|
||||
#[cfg(not(target_os = "freebsd"))]
|
||||
use std::ffi::CString;
|
||||
use std::ffi::{CStr, CString};
|
||||
use std::thread;
|
||||
|
||||
const MAX_THREAD_NAME_LEN: usize = {
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(any(target_os = "linux"))] {
|
||||
16
|
||||
} else if #[cfg(any(target_os = "illumos", target_os = "solaris"))] {
|
||||
32
|
||||
} else if #[cfg(target_os = "macos")] {
|
||||
libc::MAXTHREADNAMESIZE // 64, at the time of writing
|
||||
} else if #[cfg(target_os = "freebsd")] {
|
||||
usize::MAX // as far as I can tell
|
||||
} else {
|
||||
panic!()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
fn main() {
|
||||
// The short name should be shorter than 16 bytes which POSIX promises
|
||||
// for thread names. The length includes a null terminator.
|
||||
let short_name = "test_named".to_owned();
|
||||
let long_name = std::iter::once("test_named_thread_truncation")
|
||||
.chain(std::iter::repeat(" yada").take(100))
|
||||
.collect::<String>();
|
||||
@ -48,23 +65,120 @@ fn main() {
|
||||
}
|
||||
}
|
||||
|
||||
let result = thread::Builder::new().name(long_name.clone()).spawn(move || {
|
||||
// Rust remembers the full thread name itself.
|
||||
assert_eq!(thread::current().name(), Some(long_name.as_str()));
|
||||
thread::Builder::new()
|
||||
.spawn(move || {
|
||||
// Set short thread name.
|
||||
let cstr = CString::new(short_name.clone()).unwrap();
|
||||
assert!(cstr.to_bytes_with_nul().len() <= MAX_THREAD_NAME_LEN); // this should fit
|
||||
assert_eq!(set_thread_name(&cstr), 0);
|
||||
|
||||
// But the system is limited -- make sure we successfully set a truncation.
|
||||
let mut buf = vec![0u8; long_name.len() + 1];
|
||||
assert_eq!(get_thread_name(&mut buf), 0);
|
||||
let cstr = CStr::from_bytes_until_nul(&buf).unwrap();
|
||||
assert!(cstr.to_bytes().len() >= 15, "name is too short: len={}", cstr.to_bytes().len()); // POSIX seems to promise at least 15 chars
|
||||
assert!(long_name.as_bytes().starts_with(cstr.to_bytes()));
|
||||
// Now get it again, in various ways.
|
||||
|
||||
// Also test directly calling pthread_setname to check its return value.
|
||||
assert_eq!(set_thread_name(&cstr), 0);
|
||||
// But with a too long name it should fail (except on FreeBSD where the
|
||||
// function has no return, hence cannot indicate failure).
|
||||
#[cfg(not(target_os = "freebsd"))]
|
||||
assert_ne!(set_thread_name(&CString::new(long_name).unwrap()), 0);
|
||||
});
|
||||
result.unwrap().join().unwrap();
|
||||
// POSIX seems to promise at least 15 chars excluding a null terminator.
|
||||
let mut buf = vec![0u8; short_name.len().max(15) + 1];
|
||||
assert_eq!(get_thread_name(&mut buf), 0);
|
||||
let cstr = CStr::from_bytes_until_nul(&buf).unwrap();
|
||||
assert_eq!(cstr.to_bytes(), short_name.as_bytes());
|
||||
|
||||
// Test what happens when the buffer is shorter than 16, but still long enough.
|
||||
let res = get_thread_name(&mut buf[..15]);
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(target_os = "linux")] {
|
||||
// For glibc used by linux-gnu there should be a failue,
|
||||
// if a shorter than 16 bytes buffer is provided, even if that would be
|
||||
// large enough for the thread name.
|
||||
assert_eq!(res, libc::ERANGE);
|
||||
} else {
|
||||
// Everywhere else, this should work.
|
||||
assert_eq!(res, 0);
|
||||
// POSIX seems to promise at least 15 chars excluding a null terminator.
|
||||
let cstr = CStr::from_bytes_until_nul(&buf).unwrap();
|
||||
assert_eq!(short_name.as_bytes(), cstr.to_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
// Test what happens when the buffer is too short even for the short name.
|
||||
let res = get_thread_name(&mut buf[..4]);
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(any(target_os = "freebsd", target_os = "macos"))] {
|
||||
// On macOS and FreeBSD it's not an error for the buffer to be
|
||||
// too short for the thread name -- they truncate instead.
|
||||
assert_eq!(res, 0);
|
||||
let cstr = CStr::from_bytes_until_nul(&buf).unwrap();
|
||||
assert_eq!(cstr.to_bytes_with_nul().len(), 4);
|
||||
assert!(short_name.as_bytes().starts_with(cstr.to_bytes()));
|
||||
} else {
|
||||
// The rest should give an error.
|
||||
assert_eq!(res, libc::ERANGE);
|
||||
}
|
||||
}
|
||||
|
||||
// Test zero-sized buffer.
|
||||
let res = get_thread_name(&mut []);
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(any(target_os = "freebsd", target_os = "macos"))] {
|
||||
// On macOS and FreeBSD it's not an error for the buffer to be
|
||||
// too short for the thread name -- even with size 0.
|
||||
assert_eq!(res, 0);
|
||||
} else {
|
||||
// The rest should give an error.
|
||||
assert_eq!(res, libc::ERANGE);
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
.join()
|
||||
.unwrap();
|
||||
|
||||
thread::Builder::new()
|
||||
.spawn(move || {
|
||||
// Set full thread name.
|
||||
let cstr = CString::new(long_name.clone()).unwrap();
|
||||
let res = set_thread_name(&cstr);
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(target_os = "freebsd")] {
|
||||
// Names of all size are supported.
|
||||
assert!(cstr.to_bytes_with_nul().len() <= MAX_THREAD_NAME_LEN);
|
||||
assert_eq!(res, 0);
|
||||
} else if #[cfg(target_os = "macos")] {
|
||||
// Name is too long.
|
||||
assert!(cstr.to_bytes_with_nul().len() > MAX_THREAD_NAME_LEN);
|
||||
assert_eq!(res, libc::ENAMETOOLONG);
|
||||
} else {
|
||||
// Name is too long.
|
||||
assert!(cstr.to_bytes_with_nul().len() > MAX_THREAD_NAME_LEN);
|
||||
assert_eq!(res, libc::ERANGE);
|
||||
}
|
||||
}
|
||||
// Set the longest name we can.
|
||||
let truncated_name = &long_name[..long_name.len().min(MAX_THREAD_NAME_LEN - 1)];
|
||||
let cstr = CString::new(truncated_name).unwrap();
|
||||
assert_eq!(set_thread_name(&cstr), 0);
|
||||
|
||||
// Now get it again, in various ways.
|
||||
|
||||
// This name should round-trip properly.
|
||||
let mut buf = vec![0u8; long_name.len() + 1];
|
||||
assert_eq!(get_thread_name(&mut buf), 0);
|
||||
let cstr = CStr::from_bytes_until_nul(&buf).unwrap();
|
||||
assert_eq!(cstr.to_bytes(), truncated_name.as_bytes());
|
||||
|
||||
// Test what happens when our buffer is just one byte too small.
|
||||
let res = get_thread_name(&mut buf[..truncated_name.len()]);
|
||||
cfg_if::cfg_if! {
|
||||
if #[cfg(any(target_os = "freebsd", target_os = "macos"))] {
|
||||
// On macOS and FreeBSD it's not an error for the buffer to be
|
||||
// too short for the thread name -- they truncate instead.
|
||||
assert_eq!(res, 0);
|
||||
let cstr = CStr::from_bytes_until_nul(&buf).unwrap();
|
||||
assert_eq!(cstr.to_bytes(), &truncated_name.as_bytes()[..(truncated_name.len() - 1)]);
|
||||
} else {
|
||||
// The rest should give an error.
|
||||
assert_eq!(res, libc::ERANGE);
|
||||
}
|
||||
}
|
||||
})
|
||||
.unwrap()
|
||||
.join()
|
||||
.unwrap();
|
||||
}
|
||||
|
@ -16,6 +16,19 @@ fn main() {
|
||||
.join()
|
||||
.unwrap();
|
||||
|
||||
// Long thread name.
|
||||
let long_name = std::iter::once("test_named_thread_truncation")
|
||||
.chain(std::iter::repeat(" long").take(100))
|
||||
.collect::<String>();
|
||||
thread::Builder::new()
|
||||
.name(long_name.clone())
|
||||
.spawn(move || {
|
||||
assert_eq!(thread::current().name().unwrap(), long_name);
|
||||
})
|
||||
.unwrap()
|
||||
.join()
|
||||
.unwrap();
|
||||
|
||||
// Also check main thread name.
|
||||
assert_eq!(thread::current().name().unwrap(), "main");
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user