mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-27 01:04:03 +00:00
Auto merge of #122538 - RalfJung:miri, r=RalfJung
Miri subtree update r? `@ghost`
This commit is contained in:
commit
accc516128
2
src/tools/miri/.github/workflows/ci.yml
vendored
2
src/tools/miri/.github/workflows/ci.yml
vendored
@ -165,7 +165,7 @@ jobs:
|
||||
ZULIP_API_TOKEN: ${{ secrets.ZULIP_API_TOKEN }}
|
||||
run: |
|
||||
~/.local/bin/zulip-send --user $ZULIP_BOT_EMAIL --api-key $ZULIP_API_TOKEN --site https://rust-lang.zulipchat.com \
|
||||
--stream miri --subject "Cron Job Failure (miri, $(date -u +%Y-%m))" \
|
||||
--stream miri --subject "Miri Build Failure ($(date -u +%Y-%m))" \
|
||||
--message 'Dear @*T-miri*,
|
||||
|
||||
It would appear that the [Miri cron job build]('"https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID"') failed.
|
||||
|
21
src/tools/miri/.github/workflows/sysroots.yml
vendored
21
src/tools/miri/.github/workflows/sysroots.yml
vendored
@ -21,6 +21,13 @@ jobs:
|
||||
./miri install
|
||||
python3 -m pip install beautifulsoup4
|
||||
./ci/build-all-targets.sh
|
||||
- name: Upload build errors
|
||||
# We don't want to skip this step on failure
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: failures
|
||||
path: failures.tar.gz
|
||||
|
||||
sysroots-cron-fail-notify:
|
||||
name: sysroots cronjob failure notification
|
||||
@ -28,6 +35,11 @@ jobs:
|
||||
needs: [sysroots]
|
||||
if: failure() || cancelled()
|
||||
steps:
|
||||
# Download our build error logs
|
||||
- name: Download build errors
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: failures
|
||||
# Send a Zulip notification
|
||||
- name: Install zulip-send
|
||||
run: pip3 install zulip
|
||||
@ -36,11 +48,12 @@ jobs:
|
||||
ZULIP_BOT_EMAIL: ${{ secrets.ZULIP_BOT_EMAIL }}
|
||||
ZULIP_API_TOKEN: ${{ secrets.ZULIP_API_TOKEN }}
|
||||
run: |
|
||||
tar xf failures.tar.gz
|
||||
ls failures
|
||||
~/.local/bin/zulip-send --user $ZULIP_BOT_EMAIL --api-key $ZULIP_API_TOKEN --site https://rust-lang.zulipchat.com \
|
||||
--stream miri --subject "Cron Job Failure (miri, $(date -u +%Y-%m))" \
|
||||
--message 'Dear @*T-miri*,
|
||||
|
||||
It would appear that the [Miri sysroots cron job build]('"https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID"') failed.
|
||||
--stream miri --subject "Sysroot Build Errors ($(date -u +%Y-%m))" \
|
||||
--message 'It would appear that the [Miri sysroots cron job build]('"https://github.com/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID"') failed to build these targets:
|
||||
'"$(ls failures)"'
|
||||
|
||||
Would you mind investigating this issue?
|
||||
|
||||
|
@ -95,7 +95,7 @@ pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) {
|
||||
let target = get_arg_flag_value("--target");
|
||||
let target = target.as_ref().unwrap_or(host);
|
||||
|
||||
// If cleaning the the target directory & sysroot cache,
|
||||
// If cleaning the target directory & sysroot cache,
|
||||
// delete them then exit. There is no reason to setup a new
|
||||
// sysroot in this execution.
|
||||
if let MiriCommand::Clean = subcommand {
|
||||
|
@ -3,6 +3,7 @@
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
# .github/workflows/sysroots.yml relies on this name this to report which sysroots didn't build
|
||||
FAILS_DIR=failures
|
||||
|
||||
rm -rf $FAILS_DIR
|
||||
@ -13,14 +14,16 @@ PLATFORM_SUPPORT_FILE=$(rustc +miri --print sysroot)/share/doc/rust/html/rustc/p
|
||||
for target in $(python3 ci/scrape-targets.py $PLATFORM_SUPPORT_FILE); do
|
||||
# Wipe the cache before every build to minimize disk usage
|
||||
cargo +miri miri clean
|
||||
if cargo +miri miri setup --target $target 2>&1 | tee failures/$target; then
|
||||
if cargo +miri miri setup --target $target 2>&1 | tee $FAILS_DIR/$target; then
|
||||
# If the build succeeds, delete its output. If we have output, a build failed.
|
||||
rm $FAILS_DIR/$target
|
||||
fi
|
||||
done
|
||||
|
||||
tar czf $FAILS_DIR.tar.gz $FAILS_DIR
|
||||
|
||||
# If the sysroot for any target fails to build, we will have a file in FAILS_DIR.
|
||||
if [[ $(ls failures | wc -l) -ne 0 ]]; then
|
||||
if [[ $(ls $FAILS_DIR | wc -l) -ne 0 ]]; then
|
||||
echo "Sysroots for the following targets failed to build:"
|
||||
ls $FAILS_DIR
|
||||
exit 1
|
||||
|
@ -1 +1 @@
|
||||
4d4bb491b65c300835442f6cb4f34fc9a5685c26
|
||||
ee03c286cfdca26fa5b2a4ee40957625d2c826ff
|
||||
|
@ -1074,7 +1074,12 @@ impl VClockAlloc {
|
||||
size: Size,
|
||||
machine: &mut MiriMachine<'_, '_>,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.unique_access(alloc_id, alloc_range(Size::ZERO, size), NaWriteType::Deallocate, machine)
|
||||
self.unique_access(
|
||||
alloc_id,
|
||||
alloc_range(Size::ZERO, size),
|
||||
NaWriteType::Deallocate,
|
||||
machine,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -527,8 +527,7 @@ impl<'mir, 'tcx> MiriMachine<'mir, 'tcx> {
|
||||
pub fn emit_diagnostic(&self, e: NonHaltingDiagnostic) {
|
||||
use NonHaltingDiagnostic::*;
|
||||
|
||||
let stacktrace =
|
||||
Frame::generate_stacktrace_from_stack(self.threads.active_thread_stack());
|
||||
let stacktrace = Frame::generate_stacktrace_from_stack(self.threads.active_thread_stack());
|
||||
let (stacktrace, _was_pruned) = prune_stacktrace(stacktrace, self);
|
||||
|
||||
let (title, diag_level) = match &e {
|
||||
|
@ -297,32 +297,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
}
|
||||
|
||||
// Synchronization primitives
|
||||
"AcquireSRWLockExclusive" => {
|
||||
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
this.AcquireSRWLockExclusive(ptr)?;
|
||||
}
|
||||
"ReleaseSRWLockExclusive" => {
|
||||
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
this.ReleaseSRWLockExclusive(ptr)?;
|
||||
}
|
||||
"TryAcquireSRWLockExclusive" => {
|
||||
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
let ret = this.TryAcquireSRWLockExclusive(ptr)?;
|
||||
this.write_scalar(ret, dest)?;
|
||||
}
|
||||
"AcquireSRWLockShared" => {
|
||||
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
this.AcquireSRWLockShared(ptr)?;
|
||||
}
|
||||
"ReleaseSRWLockShared" => {
|
||||
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
this.ReleaseSRWLockShared(ptr)?;
|
||||
}
|
||||
"TryAcquireSRWLockShared" => {
|
||||
let [ptr] = this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
let ret = this.TryAcquireSRWLockShared(ptr)?;
|
||||
this.write_scalar(ret, dest)?;
|
||||
}
|
||||
"InitOnceBeginInitialize" => {
|
||||
let [ptr, flags, pending, context] =
|
||||
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
@ -335,25 +309,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
let result = this.InitOnceComplete(ptr, flags, context)?;
|
||||
this.write_scalar(result, dest)?;
|
||||
}
|
||||
"SleepConditionVariableSRW" => {
|
||||
let [condvar, lock, timeout, flags] =
|
||||
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
|
||||
let result = this.SleepConditionVariableSRW(condvar, lock, timeout, flags, dest)?;
|
||||
this.write_scalar(result, dest)?;
|
||||
}
|
||||
"WakeConditionVariable" => {
|
||||
let [condvar] =
|
||||
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
|
||||
this.WakeConditionVariable(condvar)?;
|
||||
}
|
||||
"WakeAllConditionVariable" => {
|
||||
let [condvar] =
|
||||
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
|
||||
this.WakeAllConditionVariable(condvar)?;
|
||||
}
|
||||
"WaitOnAddress" => {
|
||||
let [ptr_op, compare_op, size_op, timeout_op] =
|
||||
this.check_shim(abi, Abi::System { unwind: false }, link_name, args)?;
|
||||
|
@ -3,52 +3,14 @@ use std::time::Duration;
|
||||
use rustc_target::abi::Size;
|
||||
|
||||
use crate::concurrency::init_once::InitOnceStatus;
|
||||
use crate::concurrency::sync::{CondvarLock, RwLockMode};
|
||||
use crate::concurrency::thread::MachineCallback;
|
||||
use crate::*;
|
||||
|
||||
impl<'mir, 'tcx> EvalContextExtPriv<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
|
||||
trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
/// Try to reacquire the lock associated with the condition variable after we
|
||||
/// were signaled.
|
||||
fn reacquire_cond_lock(
|
||||
&mut self,
|
||||
thread: ThreadId,
|
||||
lock: RwLockId,
|
||||
mode: RwLockMode,
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
this.unblock_thread(thread);
|
||||
|
||||
match mode {
|
||||
RwLockMode::Read =>
|
||||
if this.rwlock_is_write_locked(lock) {
|
||||
this.rwlock_enqueue_and_block_reader(lock, thread);
|
||||
} else {
|
||||
this.rwlock_reader_lock(lock, thread);
|
||||
},
|
||||
RwLockMode::Write =>
|
||||
if this.rwlock_is_locked(lock) {
|
||||
this.rwlock_enqueue_and_block_writer(lock, thread);
|
||||
} else {
|
||||
this.rwlock_writer_lock(lock, thread);
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Windows sync primitives are pointer sized.
|
||||
// We only use the first 4 bytes for the id.
|
||||
|
||||
fn srwlock_get_id(
|
||||
&mut self,
|
||||
rwlock_op: &OpTy<'tcx, Provenance>,
|
||||
) -> InterpResult<'tcx, RwLockId> {
|
||||
let this = self.eval_context_mut();
|
||||
this.rwlock_get_or_create_id(rwlock_op, this.windows_ty_layout("SRWLOCK"), 0)
|
||||
}
|
||||
|
||||
fn init_once_get_id(
|
||||
&mut self,
|
||||
init_once_op: &OpTy<'tcx, Provenance>,
|
||||
@ -56,117 +18,11 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
this.init_once_get_or_create_id(init_once_op, this.windows_ty_layout("INIT_ONCE"), 0)
|
||||
}
|
||||
|
||||
fn condvar_get_id(
|
||||
&mut self,
|
||||
condvar_op: &OpTy<'tcx, Provenance>,
|
||||
) -> InterpResult<'tcx, CondvarId> {
|
||||
let this = self.eval_context_mut();
|
||||
this.condvar_get_or_create_id(condvar_op, this.windows_ty_layout("CONDITION_VARIABLE"), 0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriInterpCx<'mir, 'tcx> {}
|
||||
#[allow(non_snake_case)]
|
||||
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
fn AcquireSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let id = this.srwlock_get_id(lock_op)?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_locked(id) {
|
||||
// Note: this will deadlock if the lock is already locked by this
|
||||
// thread in any way.
|
||||
//
|
||||
// FIXME: Detect and report the deadlock proactively. (We currently
|
||||
// report the deadlock only when no thread can continue execution,
|
||||
// but we could detect that this lock is already locked and report
|
||||
// an error.)
|
||||
this.rwlock_enqueue_and_block_writer(id, active_thread);
|
||||
} else {
|
||||
this.rwlock_writer_lock(id, active_thread);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn TryAcquireSRWLockExclusive(
|
||||
&mut self,
|
||||
lock_op: &OpTy<'tcx, Provenance>,
|
||||
) -> InterpResult<'tcx, Scalar<Provenance>> {
|
||||
let this = self.eval_context_mut();
|
||||
let id = this.srwlock_get_id(lock_op)?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_locked(id) {
|
||||
// Lock is already held.
|
||||
Ok(Scalar::from_u8(0))
|
||||
} else {
|
||||
this.rwlock_writer_lock(id, active_thread);
|
||||
Ok(Scalar::from_u8(1))
|
||||
}
|
||||
}
|
||||
|
||||
fn ReleaseSRWLockExclusive(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let id = this.srwlock_get_id(lock_op)?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if !this.rwlock_writer_unlock(id, active_thread) {
|
||||
// The docs do not say anything about this case, but it seems better to not allow it.
|
||||
throw_ub_format!(
|
||||
"calling ReleaseSRWLockExclusive on an SRWLock that is not exclusively locked by the current thread"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn AcquireSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let id = this.srwlock_get_id(lock_op)?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_write_locked(id) {
|
||||
this.rwlock_enqueue_and_block_reader(id, active_thread);
|
||||
} else {
|
||||
this.rwlock_reader_lock(id, active_thread);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn TryAcquireSRWLockShared(
|
||||
&mut self,
|
||||
lock_op: &OpTy<'tcx, Provenance>,
|
||||
) -> InterpResult<'tcx, Scalar<Provenance>> {
|
||||
let this = self.eval_context_mut();
|
||||
let id = this.srwlock_get_id(lock_op)?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if this.rwlock_is_write_locked(id) {
|
||||
Ok(Scalar::from_u8(0))
|
||||
} else {
|
||||
this.rwlock_reader_lock(id, active_thread);
|
||||
Ok(Scalar::from_u8(1))
|
||||
}
|
||||
}
|
||||
|
||||
fn ReleaseSRWLockShared(&mut self, lock_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let id = this.srwlock_get_id(lock_op)?;
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
if !this.rwlock_reader_unlock(id, active_thread) {
|
||||
// The docs do not say anything about this case, but it seems better to not allow it.
|
||||
throw_ub_format!(
|
||||
"calling ReleaseSRWLockShared on an SRWLock that is not locked by the current thread"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn InitOnceBeginInitialize(
|
||||
&mut self,
|
||||
init_once_op: &OpTy<'tcx, Provenance>,
|
||||
@ -399,131 +255,4 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn SleepConditionVariableSRW(
|
||||
&mut self,
|
||||
condvar_op: &OpTy<'tcx, Provenance>,
|
||||
lock_op: &OpTy<'tcx, Provenance>,
|
||||
timeout_op: &OpTy<'tcx, Provenance>,
|
||||
flags_op: &OpTy<'tcx, Provenance>,
|
||||
dest: &MPlaceTy<'tcx, Provenance>,
|
||||
) -> InterpResult<'tcx, Scalar<Provenance>> {
|
||||
let this = self.eval_context_mut();
|
||||
|
||||
let condvar_id = this.condvar_get_id(condvar_op)?;
|
||||
let lock_id = this.srwlock_get_id(lock_op)?;
|
||||
let timeout_ms = this.read_scalar(timeout_op)?.to_u32()?;
|
||||
let flags = this.read_scalar(flags_op)?.to_u32()?;
|
||||
|
||||
let timeout_time = if timeout_ms == this.eval_windows_u32("c", "INFINITE") {
|
||||
None
|
||||
} else {
|
||||
let duration = Duration::from_millis(timeout_ms.into());
|
||||
Some(this.machine.clock.now().checked_add(duration).unwrap())
|
||||
};
|
||||
|
||||
let shared_mode = 0x1; // CONDITION_VARIABLE_LOCKMODE_SHARED is not in std
|
||||
let mode = if flags == 0 {
|
||||
RwLockMode::Write
|
||||
} else if flags == shared_mode {
|
||||
RwLockMode::Read
|
||||
} else {
|
||||
throw_unsup_format!("unsupported `Flags` {flags} in `SleepConditionVariableSRW`");
|
||||
};
|
||||
|
||||
let active_thread = this.get_active_thread();
|
||||
|
||||
let was_locked = match mode {
|
||||
RwLockMode::Read => this.rwlock_reader_unlock(lock_id, active_thread),
|
||||
RwLockMode::Write => this.rwlock_writer_unlock(lock_id, active_thread),
|
||||
};
|
||||
|
||||
if !was_locked {
|
||||
throw_ub_format!(
|
||||
"calling SleepConditionVariableSRW with an SRWLock that is not locked by the current thread"
|
||||
);
|
||||
}
|
||||
|
||||
this.block_thread(active_thread);
|
||||
this.condvar_wait(condvar_id, active_thread, CondvarLock::RwLock { id: lock_id, mode });
|
||||
|
||||
if let Some(timeout_time) = timeout_time {
|
||||
struct Callback<'tcx> {
|
||||
thread: ThreadId,
|
||||
condvar_id: CondvarId,
|
||||
lock_id: RwLockId,
|
||||
mode: RwLockMode,
|
||||
dest: MPlaceTy<'tcx, Provenance>,
|
||||
}
|
||||
|
||||
impl<'tcx> VisitProvenance for Callback<'tcx> {
|
||||
fn visit_provenance(&self, visit: &mut VisitWith<'_>) {
|
||||
let Callback { thread: _, condvar_id: _, lock_id: _, mode: _, dest } = self;
|
||||
dest.visit_provenance(visit);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir> MachineCallback<'mir, 'tcx> for Callback<'tcx> {
|
||||
fn call(&self, this: &mut MiriInterpCx<'mir, 'tcx>) -> InterpResult<'tcx> {
|
||||
this.reacquire_cond_lock(self.thread, self.lock_id, self.mode)?;
|
||||
|
||||
this.condvar_remove_waiter(self.condvar_id, self.thread);
|
||||
|
||||
let error_timeout = this.eval_windows("c", "ERROR_TIMEOUT");
|
||||
this.set_last_error(error_timeout)?;
|
||||
this.write_scalar(this.eval_windows("c", "FALSE"), &self.dest)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
this.register_timeout_callback(
|
||||
active_thread,
|
||||
Time::Monotonic(timeout_time),
|
||||
Box::new(Callback {
|
||||
thread: active_thread,
|
||||
condvar_id,
|
||||
lock_id,
|
||||
mode,
|
||||
dest: dest.clone(),
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(this.eval_windows("c", "TRUE"))
|
||||
}
|
||||
|
||||
fn WakeConditionVariable(&mut self, condvar_op: &OpTy<'tcx, Provenance>) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let condvar_id = this.condvar_get_id(condvar_op)?;
|
||||
|
||||
if let Some((thread, lock)) = this.condvar_signal(condvar_id) {
|
||||
if let CondvarLock::RwLock { id, mode } = lock {
|
||||
this.reacquire_cond_lock(thread, id, mode)?;
|
||||
this.unregister_timeout_callback_if_exists(thread);
|
||||
} else {
|
||||
panic!("mutexes should not exist on windows");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn WakeAllConditionVariable(
|
||||
&mut self,
|
||||
condvar_op: &OpTy<'tcx, Provenance>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let condvar_id = this.condvar_get_id(condvar_op)?;
|
||||
|
||||
while let Some((thread, lock)) = this.condvar_signal(condvar_id) {
|
||||
if let CondvarLock::RwLock { id, mode } = lock {
|
||||
this.reacquire_cond_lock(thread, id, mode)?;
|
||||
this.unregister_timeout_callback_if_exists(thread);
|
||||
} else {
|
||||
panic!("mutexes should not exist on windows");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
//@compile-flags: -Cpanic=abort
|
||||
//@normalize-stderr-test: "OS `.*`" -> "$$OS"
|
||||
// Make sure we pretend the allocation symbols don't exist when there is no allocator
|
||||
|
||||
#![feature(lang_items, start)]
|
||||
#![feature(start)]
|
||||
#![no_std]
|
||||
|
||||
extern "Rust" {
|
||||
@ -21,6 +22,3 @@ fn start(_: isize, _: *const *const u8) -> isize {
|
||||
fn panic_handler(_: &core::panic::PanicInfo) -> ! {
|
||||
loop {}
|
||||
}
|
||||
|
||||
#[lang = "eh_personality"]
|
||||
fn eh_personality() {}
|
||||
|
@ -1,5 +1,6 @@
|
||||
#![feature(lang_items, start, core_intrinsics)]
|
||||
#![feature(start, core_intrinsics)]
|
||||
#![no_std]
|
||||
//@compile-flags: -Cpanic=abort
|
||||
// windows tls dtors go through libstd right now, thus this test
|
||||
// cannot pass. When windows tls dtors go through the special magic
|
||||
// windows linker section, we can run this test on windows again.
|
||||
@ -36,6 +37,3 @@ fn panic_handler(panic_info: &core::panic::PanicInfo) -> ! {
|
||||
writeln!(HostErr, "{panic_info}").ok();
|
||||
core::intrinsics::abort(); //~ ERROR: the program aborted execution
|
||||
}
|
||||
|
||||
#[lang = "eh_personality"]
|
||||
fn eh_personality() {}
|
||||
|
@ -1,226 +0,0 @@
|
||||
//@only-target-windows: Uses win32 api functions
|
||||
// We are making scheduler assumptions here.
|
||||
//@compile-flags: -Zmiri-preemption-rate=0
|
||||
|
||||
use std::ptr::null_mut;
|
||||
use std::thread;
|
||||
|
||||
use windows_sys::Win32::System::Threading::{
|
||||
AcquireSRWLockExclusive, AcquireSRWLockShared, ReleaseSRWLockExclusive, ReleaseSRWLockShared,
|
||||
SleepConditionVariableSRW, WakeAllConditionVariable, CONDITION_VARIABLE,
|
||||
CONDITION_VARIABLE_LOCKMODE_SHARED, INFINITE, SRWLOCK,
|
||||
};
|
||||
|
||||
// not in windows-sys
|
||||
const SRWLOCK_INIT: SRWLOCK = SRWLOCK { Ptr: null_mut() };
|
||||
const CONDITION_VARIABLE_INIT: CONDITION_VARIABLE = CONDITION_VARIABLE { Ptr: null_mut() };
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct SendPtr<T>(*mut T);
|
||||
|
||||
unsafe impl<T> Send for SendPtr<T> {}
|
||||
|
||||
/// threads should be able to reacquire the lock while it is locked by multiple other threads in shared mode
|
||||
fn all_shared() {
|
||||
println!("all_shared");
|
||||
|
||||
let mut lock = SRWLOCK_INIT;
|
||||
let mut condvar = CONDITION_VARIABLE_INIT;
|
||||
|
||||
let lock_ptr = SendPtr(&mut lock);
|
||||
let condvar_ptr = SendPtr(&mut condvar);
|
||||
|
||||
let mut handles = Vec::with_capacity(10);
|
||||
|
||||
// waiters
|
||||
for i in 0..5 {
|
||||
handles.push(thread::spawn(move || {
|
||||
let condvar_ptr = condvar_ptr; // avoid field capture
|
||||
let lock_ptr = lock_ptr; // avoid field capture
|
||||
unsafe {
|
||||
AcquireSRWLockShared(lock_ptr.0);
|
||||
}
|
||||
println!("exclusive waiter {i} locked");
|
||||
|
||||
let r = unsafe {
|
||||
SleepConditionVariableSRW(
|
||||
condvar_ptr.0,
|
||||
lock_ptr.0,
|
||||
INFINITE,
|
||||
CONDITION_VARIABLE_LOCKMODE_SHARED,
|
||||
)
|
||||
};
|
||||
assert_ne!(r, 0);
|
||||
|
||||
println!("exclusive waiter {i} reacquired lock");
|
||||
|
||||
// unlocking is unnecessary because the lock is never used again
|
||||
}));
|
||||
}
|
||||
|
||||
// ensures each waiter is waiting by this point
|
||||
thread::yield_now();
|
||||
|
||||
// readers
|
||||
for i in 0..5 {
|
||||
handles.push(thread::spawn(move || {
|
||||
let lock_ptr = lock_ptr; // avoid field capture
|
||||
unsafe {
|
||||
AcquireSRWLockShared(lock_ptr.0);
|
||||
}
|
||||
println!("reader {i} locked");
|
||||
|
||||
// switch to next reader or main thread
|
||||
thread::yield_now();
|
||||
|
||||
unsafe {
|
||||
ReleaseSRWLockShared(lock_ptr.0);
|
||||
}
|
||||
println!("reader {i} unlocked");
|
||||
}));
|
||||
}
|
||||
|
||||
// ensures each reader has acquired the lock
|
||||
thread::yield_now();
|
||||
|
||||
unsafe {
|
||||
WakeAllConditionVariable(condvar_ptr.0);
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// reacquiring a lock should wait until the lock is not exclusively locked
|
||||
fn shared_sleep_and_exclusive_lock() {
|
||||
println!("shared_sleep_and_exclusive_lock");
|
||||
|
||||
let mut lock = SRWLOCK_INIT;
|
||||
let mut condvar = CONDITION_VARIABLE_INIT;
|
||||
|
||||
let lock_ptr = SendPtr(&mut lock);
|
||||
let condvar_ptr = SendPtr(&mut condvar);
|
||||
|
||||
let mut waiters = Vec::with_capacity(5);
|
||||
for i in 0..5 {
|
||||
waiters.push(thread::spawn(move || {
|
||||
let lock_ptr = lock_ptr; // avoid field capture
|
||||
let condvar_ptr = condvar_ptr; // avoid field capture
|
||||
unsafe {
|
||||
AcquireSRWLockShared(lock_ptr.0);
|
||||
}
|
||||
println!("shared waiter {i} locked");
|
||||
|
||||
let r = unsafe {
|
||||
SleepConditionVariableSRW(
|
||||
condvar_ptr.0,
|
||||
lock_ptr.0,
|
||||
INFINITE,
|
||||
CONDITION_VARIABLE_LOCKMODE_SHARED,
|
||||
)
|
||||
};
|
||||
assert_ne!(r, 0);
|
||||
|
||||
println!("shared waiter {i} reacquired lock");
|
||||
|
||||
// unlocking is unnecessary because the lock is never used again
|
||||
}));
|
||||
}
|
||||
|
||||
// ensures each waiter is waiting by this point
|
||||
thread::yield_now();
|
||||
|
||||
unsafe {
|
||||
AcquireSRWLockExclusive(lock_ptr.0);
|
||||
}
|
||||
println!("main locked");
|
||||
|
||||
unsafe {
|
||||
WakeAllConditionVariable(condvar_ptr.0);
|
||||
}
|
||||
|
||||
// waiters are now waiting for the lock to be unlocked
|
||||
thread::yield_now();
|
||||
|
||||
unsafe {
|
||||
ReleaseSRWLockExclusive(lock_ptr.0);
|
||||
}
|
||||
println!("main unlocked");
|
||||
|
||||
for handle in waiters {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// threads reacquiring locks should wait for all locks to be released first
|
||||
fn exclusive_sleep_and_shared_lock() {
|
||||
println!("exclusive_sleep_and_shared_lock");
|
||||
|
||||
let mut lock = SRWLOCK_INIT;
|
||||
let mut condvar = CONDITION_VARIABLE_INIT;
|
||||
|
||||
let lock_ptr = SendPtr(&mut lock);
|
||||
let condvar_ptr = SendPtr(&mut condvar);
|
||||
|
||||
let mut handles = Vec::with_capacity(10);
|
||||
for i in 0..5 {
|
||||
handles.push(thread::spawn(move || {
|
||||
let lock_ptr = lock_ptr; // avoid field capture
|
||||
let condvar_ptr = condvar_ptr; // avoid field capture
|
||||
unsafe {
|
||||
AcquireSRWLockExclusive(lock_ptr.0);
|
||||
}
|
||||
|
||||
println!("exclusive waiter {i} locked");
|
||||
|
||||
let r = unsafe { SleepConditionVariableSRW(condvar_ptr.0, lock_ptr.0, INFINITE, 0) };
|
||||
assert_ne!(r, 0);
|
||||
|
||||
println!("exclusive waiter {i} reacquired lock");
|
||||
|
||||
// switch to next waiter or main thread
|
||||
thread::yield_now();
|
||||
|
||||
unsafe {
|
||||
ReleaseSRWLockExclusive(lock_ptr.0);
|
||||
}
|
||||
println!("exclusive waiter {i} unlocked");
|
||||
}));
|
||||
}
|
||||
|
||||
for i in 0..5 {
|
||||
handles.push(thread::spawn(move || {
|
||||
let lock_ptr = lock_ptr; // avoid field capture
|
||||
unsafe {
|
||||
AcquireSRWLockShared(lock_ptr.0);
|
||||
}
|
||||
println!("reader {i} locked");
|
||||
|
||||
// switch to next reader or main thread
|
||||
thread::yield_now();
|
||||
|
||||
unsafe {
|
||||
ReleaseSRWLockShared(lock_ptr.0);
|
||||
}
|
||||
println!("reader {i} unlocked");
|
||||
}));
|
||||
}
|
||||
|
||||
// ensures each reader has acquired the lock
|
||||
thread::yield_now();
|
||||
|
||||
unsafe {
|
||||
WakeAllConditionVariable(condvar_ptr.0);
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
handle.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
all_shared();
|
||||
shared_sleep_and_exclusive_lock();
|
||||
exclusive_sleep_and_shared_lock();
|
||||
}
|
@ -1,60 +0,0 @@
|
||||
all_shared
|
||||
exclusive waiter 0 locked
|
||||
exclusive waiter 1 locked
|
||||
exclusive waiter 2 locked
|
||||
exclusive waiter 3 locked
|
||||
exclusive waiter 4 locked
|
||||
reader 0 locked
|
||||
reader 1 locked
|
||||
reader 2 locked
|
||||
reader 3 locked
|
||||
reader 4 locked
|
||||
exclusive waiter 0 reacquired lock
|
||||
exclusive waiter 1 reacquired lock
|
||||
exclusive waiter 2 reacquired lock
|
||||
exclusive waiter 3 reacquired lock
|
||||
exclusive waiter 4 reacquired lock
|
||||
reader 0 unlocked
|
||||
reader 1 unlocked
|
||||
reader 2 unlocked
|
||||
reader 3 unlocked
|
||||
reader 4 unlocked
|
||||
shared_sleep_and_exclusive_lock
|
||||
shared waiter 0 locked
|
||||
shared waiter 1 locked
|
||||
shared waiter 2 locked
|
||||
shared waiter 3 locked
|
||||
shared waiter 4 locked
|
||||
main locked
|
||||
main unlocked
|
||||
shared waiter 0 reacquired lock
|
||||
shared waiter 1 reacquired lock
|
||||
shared waiter 2 reacquired lock
|
||||
shared waiter 3 reacquired lock
|
||||
shared waiter 4 reacquired lock
|
||||
exclusive_sleep_and_shared_lock
|
||||
exclusive waiter 0 locked
|
||||
exclusive waiter 1 locked
|
||||
exclusive waiter 2 locked
|
||||
exclusive waiter 3 locked
|
||||
exclusive waiter 4 locked
|
||||
reader 0 locked
|
||||
reader 1 locked
|
||||
reader 2 locked
|
||||
reader 3 locked
|
||||
reader 4 locked
|
||||
reader 0 unlocked
|
||||
reader 1 unlocked
|
||||
reader 2 unlocked
|
||||
reader 3 unlocked
|
||||
reader 4 unlocked
|
||||
exclusive waiter 0 reacquired lock
|
||||
exclusive waiter 0 unlocked
|
||||
exclusive waiter 1 reacquired lock
|
||||
exclusive waiter 1 unlocked
|
||||
exclusive waiter 2 reacquired lock
|
||||
exclusive waiter 2 unlocked
|
||||
exclusive waiter 3 reacquired lock
|
||||
exclusive waiter 3 unlocked
|
||||
exclusive waiter 4 reacquired lock
|
||||
exclusive waiter 4 unlocked
|
@ -10,9 +10,9 @@
|
||||
use std::{
|
||||
alloc::{AllocError, Allocator, Layout},
|
||||
cell::{Cell, UnsafeCell},
|
||||
mem,
|
||||
ptr::{self, addr_of, NonNull},
|
||||
thread::{self, ThreadId},
|
||||
mem,
|
||||
};
|
||||
|
||||
const BIN_SIZE: usize = 8;
|
||||
@ -33,7 +33,7 @@ impl MyBin {
|
||||
}
|
||||
// Cast the *entire* thing to a raw pointer to not restrict its provenance.
|
||||
let bin = self as *const MyBin;
|
||||
let base_ptr = UnsafeCell::raw_get(unsafe{ addr_of!((*bin).memory )}).cast::<usize>();
|
||||
let base_ptr = UnsafeCell::raw_get(unsafe { addr_of!((*bin).memory) }).cast::<usize>();
|
||||
let ptr = unsafe { NonNull::new_unchecked(base_ptr.add(top)) };
|
||||
self.top.set(top + 1);
|
||||
Some(ptr.cast())
|
||||
@ -64,22 +64,14 @@ impl MyAllocator {
|
||||
MyAllocator {
|
||||
thread_id,
|
||||
bins: Box::new(
|
||||
[MyBin {
|
||||
top: Cell::new(0),
|
||||
thread_id,
|
||||
memory: UnsafeCell::default(),
|
||||
}; 1],
|
||||
[MyBin { top: Cell::new(0), thread_id, memory: UnsafeCell::default() }; 1],
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
// Pretends to be expensive finding a suitable bin for the layout.
|
||||
fn find_bin(&self, layout: Layout) -> Option<&MyBin> {
|
||||
if layout == Layout::new::<usize>() {
|
||||
Some(&self.bins[0])
|
||||
} else {
|
||||
None
|
||||
}
|
||||
if layout == Layout::new::<usize>() { Some(&self.bins[0]) } else { None }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
#![feature(lang_items, start)]
|
||||
#![feature(start)]
|
||||
#![no_std]
|
||||
//@compile-flags: -Cpanic=abort
|
||||
// windows tls dtors go through libstd right now, thus this test
|
||||
// cannot pass. When windows tls dtors go through the special magic
|
||||
// windows linker section, we can run this test on windows again.
|
||||
@ -24,6 +25,3 @@ fn start(_: isize, _: *const *const u8) -> isize {
|
||||
fn panic_handler(_: &core::panic::PanicInfo) -> ! {
|
||||
loop {}
|
||||
}
|
||||
|
||||
#[lang = "eh_personality"]
|
||||
fn eh_personality() {}
|
||||
|
@ -1,4 +1,5 @@
|
||||
#![feature(lang_items, start)]
|
||||
//@compile-flags: -Cpanic=abort
|
||||
#![feature(start)]
|
||||
#![no_std]
|
||||
|
||||
// Plumbing to let us use `writeln!` to host stdout:
|
||||
@ -32,6 +33,3 @@ fn start(_: isize, _: *const *const u8) -> isize {
|
||||
fn panic_handler(_: &core::panic::PanicInfo) -> ! {
|
||||
loop {}
|
||||
}
|
||||
|
||||
#[lang = "eh_personality"]
|
||||
fn eh_personality() {}
|
||||
|
@ -1,4 +1,4 @@
|
||||
#![feature(lang_items, unboxed_closures, fn_traits)]
|
||||
#![feature(unboxed_closures, fn_traits)]
|
||||
|
||||
struct S3 {
|
||||
x: i32,
|
||||
|
@ -24,7 +24,8 @@ fn test_time_passes() {
|
||||
assert_eq!(now2 - diff, now1);
|
||||
// The virtual clock is deterministic and I got 15ms on a 64-bit Linux machine. However, this
|
||||
// changes according to the platform so we use an interval to be safe. This should be updated
|
||||
// if `NANOSECONDS_PER_BASIC_BLOCK` changes.
|
||||
// if `NANOSECONDS_PER_BASIC_BLOCK` changes. It may also need updating if the standard library
|
||||
// code that runs in the loop above changes.
|
||||
assert!(diff.as_millis() > 5);
|
||||
assert!(diff.as_millis() < 20);
|
||||
}
|
||||
@ -37,8 +38,18 @@ fn test_block_for_one_second() {
|
||||
while Instant::now() < end {}
|
||||
}
|
||||
|
||||
/// Ensures that we get the same behavior across all targets.
|
||||
fn test_deterministic() {
|
||||
let begin = Instant::now();
|
||||
for _ in 0..100_000 {}
|
||||
let time = begin.elapsed();
|
||||
println!("The loop took around {}s", time.as_secs());
|
||||
println!("(It's fine for this number to change when you `--bless` this test.)")
|
||||
}
|
||||
|
||||
fn main() {
|
||||
test_time_passes();
|
||||
test_block_for_one_second();
|
||||
test_sleep();
|
||||
test_deterministic();
|
||||
}
|
||||
|
@ -0,0 +1,2 @@
|
||||
The loop took around 12s
|
||||
(It's fine for this number to change when you `--bless` this test.)
|
@ -1,8 +0,0 @@
|
||||
use std::time::Instant;
|
||||
|
||||
fn main() {
|
||||
let begin = Instant::now();
|
||||
for _ in 0..100_000 {}
|
||||
let time = begin.elapsed();
|
||||
println!("The loop took around {}s", time.as_secs());
|
||||
}
|
@ -1 +0,0 @@
|
||||
The loop took around 12s
|
Loading…
Reference in New Issue
Block a user