Split up RawMutex trait a bit

This commit is contained in:
James Munns 2024-07-21 15:37:34 +02:00
parent d4f906dbc7
commit 1015d32105
6 changed files with 371 additions and 33 deletions

View File

@ -42,8 +42,6 @@ pub struct NoopRawMutex {
_phantom: PhantomData<*mut ()>,
}
unsafe impl Send for NoopRawMutex {}
impl NoopRawMutex {
/// Create a new `NoopRawMutex`.
pub const fn new() -> Self {

View File

@ -4,3 +4,10 @@ version = "0.1.0"
edition = "2021"
[dependencies]
critical-section = { version = "1.1", optional = true }
[features]
default = [
"impl-critical-section",
]
impl-critical-section = ["dep:critical-section"]

7
scoped-mutex/build.rs Normal file
View File

@ -0,0 +1,7 @@
#[path = "./build_common.rs"]
mod common;
fn main() {
let mut cfgs = common::CfgSet::new();
common::set_target_cfgs(&mut cfgs);
}

View File

@ -0,0 +1,113 @@
// NOTE: this file is copy-pasted between several Embassy crates, because there is no
// straightforward way to share this code:
// - it cannot be placed into the root of the repo and linked from each build.rs using `#[path =
// "../build_common.rs"]`, because `cargo publish` requires that all files published with a crate
// reside in the crate's directory,
// - it cannot be symlinked from `embassy-xxx/build_common.rs` to `../build_common.rs`, because
// symlinks don't work on Windows.
use std::collections::HashSet;
use std::env;
use std::ffi::OsString;
use std::process::Command;
/// Helper for emitting cargo instruction for enabling configs (`cargo:rustc-cfg=X`) and declaring
/// them (`cargo:rust-check-cfg=cfg(X)`).
#[derive(Debug)]
pub struct CfgSet {
enabled: HashSet<String>,
declared: HashSet<String>,
emit_declared: bool,
}
impl CfgSet {
pub fn new() -> Self {
Self {
enabled: HashSet::new(),
declared: HashSet::new(),
emit_declared: is_rustc_nightly(),
}
}
/// Enable a config, which can then be used in `#[cfg(...)]` for conditional compilation.
///
/// All configs that can potentially be enabled should be unconditionally declared using
/// [`Self::declare()`].
pub fn enable(&mut self, cfg: impl AsRef<str>) {
if self.enabled.insert(cfg.as_ref().to_owned()) {
println!("cargo:rustc-cfg={}", cfg.as_ref());
}
}
pub fn enable_all(&mut self, cfgs: &[impl AsRef<str>]) {
for cfg in cfgs.iter() {
self.enable(cfg.as_ref());
}
}
/// Declare a valid config for conditional compilation, without enabling it.
///
/// This enables rustc to check that the configs in `#[cfg(...)]` attributes are valid.
pub fn declare(&mut self, cfg: impl AsRef<str>) {
if self.declared.insert(cfg.as_ref().to_owned()) && self.emit_declared {
println!("cargo:rustc-check-cfg=cfg({})", cfg.as_ref());
}
}
pub fn declare_all(&mut self, cfgs: &[impl AsRef<str>]) {
for cfg in cfgs.iter() {
self.declare(cfg.as_ref());
}
}
pub fn set(&mut self, cfg: impl Into<String>, enable: bool) {
let cfg = cfg.into();
if enable {
self.enable(cfg.clone());
}
self.declare(cfg);
}
}
fn is_rustc_nightly() -> bool {
if env::var_os("EMBASSY_FORCE_CHECK_CFG").is_some() {
return true;
}
let rustc = env::var_os("RUSTC").unwrap_or_else(|| OsString::from("rustc"));
let output = Command::new(rustc)
.arg("--version")
.output()
.expect("failed to run `rustc --version`");
String::from_utf8_lossy(&output.stdout).contains("nightly")
}
/// Sets configs that describe the target platform.
pub fn set_target_cfgs(cfgs: &mut CfgSet) {
let target = env::var("TARGET").unwrap();
if target.starts_with("thumbv6m-") {
cfgs.enable_all(&["cortex_m", "armv6m"]);
} else if target.starts_with("thumbv7m-") {
cfgs.enable_all(&["cortex_m", "armv7m"]);
} else if target.starts_with("thumbv7em-") {
cfgs.enable_all(&["cortex_m", "armv7m", "armv7em"]);
} else if target.starts_with("thumbv8m.base") {
cfgs.enable_all(&["cortex_m", "armv8m", "armv8m_base"]);
} else if target.starts_with("thumbv8m.main") {
cfgs.enable_all(&["cortex_m", "armv8m", "armv8m_main"]);
}
cfgs.declare_all(&[
"cortex_m",
"armv6m",
"armv7m",
"armv7em",
"armv8m",
"armv8m_base",
"armv8m_main",
]);
cfgs.set("has_fpu", target.ends_with("-eabihf"));
}

178
scoped-mutex/src/impls.rs Normal file
View File

@ -0,0 +1,178 @@
//! Mutex primitives.
//!
//! This module provides impls of the [`RawMutex`] trait
//!
//! [`RawMutex`]: crate::RawMutex
#![allow(clippy::new_without_default)]
#![allow(clippy::declare_interior_mutable_const)]
use core::marker::PhantomData;
use core::sync::atomic::{AtomicBool, Ordering};
use crate::{ConstInit, UnconstRawMutex};
#[cfg(feature = "impl-critical-section")]
pub mod cs {
use super::*;
/// A mutex that allows borrowing data across executors and interrupts.
///
/// # Safety
///
/// This mutex is safe to share between different executors and interrupts.
pub struct CriticalSectionRawMutex {
taken: AtomicBool,
}
unsafe impl Send for CriticalSectionRawMutex {}
unsafe impl Sync for CriticalSectionRawMutex {}
impl CriticalSectionRawMutex {
/// Create a new `CriticalSectionRawMutex`.
pub const fn new() -> Self {
Self {
taken: AtomicBool::new(false),
}
}
}
impl ConstInit for CriticalSectionRawMutex {
const INIT: Self = Self::new();
}
unsafe impl UnconstRawMutex for CriticalSectionRawMutex {
#[inline]
fn try_lock<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
critical_section::with(|_| {
// NOTE: separated load/stores are acceptable as we are in
// a critical section
if self.taken.load(Ordering::Relaxed) {
return None;
}
self.taken.store(true, Ordering::Relaxed);
let ret = f();
self.taken.store(false, Ordering::Relaxed);
Some(ret)
})
}
}
}
// ================
pub mod local {
use super::*;
/// A mutex that allows borrowing data in local context.
///
/// This acts similar to a RefCell, with scoped access patterns, though
/// without being able to borrow the data twice.
pub struct LocalRawMutex {
taken: AtomicBool,
/// Prevent this from being sync or send
_phantom: PhantomData<*mut ()>,
}
impl LocalRawMutex {
/// Create a new `LocalRawMutex`.
pub const fn new() -> Self {
Self {
taken: AtomicBool::new(false),
_phantom: PhantomData,
}
}
}
impl ConstInit for LocalRawMutex {
const INIT: Self = Self::new();
}
unsafe impl UnconstRawMutex for LocalRawMutex {
#[inline]
fn try_lock<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
// NOTE: separated load/stores are acceptable as we are !Send and !Sync,
// meaning that we can only be accessed within a single thread
if self.taken.load(Ordering::Relaxed) {
return None;
}
self.taken.store(true, Ordering::Relaxed);
let ret = f();
self.taken.store(false, Ordering::Relaxed);
Some(ret)
}
}
}
// ================
#[cfg(any(cortex_m, feature = "std"))]
mod thread_mode {
use super::*;
/// A "mutex" that only allows borrowing from thread mode.
///
/// # Safety
///
/// **This Mutex is only safe on single-core systems.**
///
/// On multi-core systems, a `ThreadModeRawMutex` **is not sufficient** to ensure exclusive access.
pub struct ThreadModeRawMutex {
taken: AtomicBool,
}
unsafe impl Send for ThreadModeRawMutex {}
unsafe impl Sync for ThreadModeRawMutex {}
impl ThreadModeRawMutex {
/// Create a new `ThreadModeRawMutex`.
pub const fn new() -> Self {
Self {
taken: AtomicBool::new(false),
}
}
}
impl ConstInit for ThreadModeRawMutex {
const INIT: Self = Self::new();
}
unsafe impl UnconstRawMutex for ThreadModeRawMutex {
#[inline]
fn try_lock<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
if !in_thread_mode() {
return None;
}
// NOTE: separated load/stores are acceptable as we checked we are only
// accessed from a single thread (checked above)
assert!(self.taken.load(Ordering::Relaxed));
self.taken.store(true, Ordering::Relaxed);
let ret = f();
self.taken.store(false, Ordering::Relaxed);
Some(ret)
}
}
impl Drop for ThreadModeRawMutex {
fn drop(&mut self) {
// Only allow dropping from thread mode. Dropping calls drop on the inner `T`, so
// `drop` needs the same guarantees as `lock`. `ThreadModeMutex<T>` is Send even if
// T isn't, so without this check a user could create a ThreadModeMutex in thread mode,
// send it to interrupt context and drop it there, which would "send" a T even if T is not Send.
assert!(
in_thread_mode(),
"ThreadModeMutex can only be dropped from thread mode."
);
// Drop of the inner `T` happens after this.
}
}
pub(crate) fn in_thread_mode() -> bool {
#[cfg(feature = "std")]
return Some("main") == std::thread::current().name();
#[cfg(not(feature = "std"))]
// ICSR.VECTACTIVE == 0
return unsafe { (0xE000ED04 as *const u32).read_volatile() } & 0x1FF == 0;
}
}

View File

@ -1,57 +1,100 @@
#![no_std]
use core::cell::UnsafeCell;
use core::sync::atomic::{AtomicBool, Ordering};
pub mod impls;
/// Raw mutex trait.
///
/// This mutex is "raw", which means it does not actually contain the protected data, it
/// just implements the mutex mechanism. For most uses you should use [`super::Mutex`] instead,
/// which is generic over a RawMutex and contains the protected data.
///
/// Note that, unlike other mutexes, implementations only guarantee no
/// concurrent access from other threads: concurrent access from the current
/// thread is allowed. For example, it's possible to lock the same mutex multiple times reentrantly.
///
/// Therefore, locking a `RawMutex` is only enough to guarantee safe shared (`&`) access
/// to the data, it is not enough to guarantee exclusive (`&mut`) access.
/// just implements the mutex mechanism. For most uses you should use [`BlockingMutex`]
/// instead, which is generic over a RawMutex and contains the protected data.
///
/// # Safety
///
/// RawMutex implementations must ensure that, while locked, no other thread can lock
/// the RawMutex concurrently.
/// the RawMutex concurrently. This can usually be implemented using an [`AtomicBool`]
/// to track the "taken" state. See [crate::impls] for examples of correct implementations.
///
/// [`AtomicBool`]: core::sync::atomic::AtomicBool
///
/// Unsafe code is allowed to rely on this fact, so incorrect implementations will cause undefined behavior.
pub unsafe trait RawMutex {
/// Create a new `RawMutex` instance.
///
/// # Implementation Note:
///
/// This is actually a marker trait for types that implement [`UnconstRawMutex`] and
/// [`ConstInit`]. This is to allow cases where a mutex cannot be created in const
/// context, for example some runtime/OS mutexes, as well as testing mutexes like
/// those from `loom`.
///
/// If you are implementing your own RawMutex primitive, you should implement the
/// [`UnconstRawMutex`] and [`ConstInit`] traits, and rely on the blanket impl
/// of `impl<T: UnconstRawMutex + ConstInit> RawMutex for T {}`.
pub trait RawMutex: UnconstRawMutex + ConstInit {}
impl<T: UnconstRawMutex + ConstInit> RawMutex for T {}
pub trait ConstInit {
/// Create a new instance.
///
/// This is a const instead of a method to allow creating instances in const context.
const INIT: Self;
}
/// Lock this `RawMutex`.
fn lock<R>(&self, f: impl FnOnce() -> R) -> R;
/// Raw mutex trait.
///
/// This mutex is "raw", which means it does not actually contain the protected data, it
/// just implements the mutex mechanism. For most uses you should use [`BlockingMutex`]
/// instead, which is generic over a UnconstRawMutex and contains the protected data.
///
/// # Safety
///
/// UnconstRawMutex implementations must ensure that, while locked, no other thread can lock
/// the RawMutex concurrently. This can usually be implemented using an [`AtomicBool`]
/// to track the "taken" state. See [crate::impls] for examples of correct implementations.
///
/// Unsafe code is allowed to rely on this fact, so incorrect implementations will cause undefined behavior.
///
/// [`AtomicBool`]: core::sync::atomic::AtomicBool
pub unsafe trait UnconstRawMutex {
/// Lock this `RawMutex`, calling `f()` after the lock has been acquired, and releasing
/// the lock after the completion of `f()`.
///
/// If this was successful, `Some(R)` will be returned. If the mutex was already locked,
/// `None` will be returned
fn try_lock<R>(&self, f: impl FnOnce() -> R) -> Option<R>;
/// Lock this `RawMutex`, calling `f()` after the lock has been acquired, and releasing
/// the lock after the completion of `f()`.
///
/// Panics if the lock is already locked.
fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
self.try_lock(f).expect("Attempted to take lock re-entrantly")
}
}
/// Blocking mutex (not async)
///
/// Provides a blocking mutual exclusion primitive backed by an implementation of [`raw::RawMutex`].
/// Provides a blocking mutual exclusion primitive backed by an implementation of [`RawMutex`].
///
/// Which implementation you select depends on the context in which you're using the mutex, and you can choose which kind
/// of interior mutability fits your use case.
///
/// Use [`CriticalSectionMutex`] when data can be shared between threads and interrupts.
/// Use [`CriticalSectionRawMutex`] when data can be shared between threads and interrupts.
///
/// Use [`NoopMutex`] when data is only shared between tasks running on the same executor.
/// Use [`LocalRawMutex`] when data is only shared between tasks running on the same executor.
///
/// Use [`ThreadModeMutex`] when data is shared between tasks running on the same executor but you want a global singleton.
/// Use [`ThreadModeRawMutex`] when data is shared between tasks running on the same executor but you want a global singleton.
///
/// In all cases, the blocking mutex is intended to be short lived and not held across await points.
/// Use the async [`Mutex`](crate::mutex::Mutex) if you need a lock that is held across await points.
///
/// [`CriticalSectionRawMutex`]: crate::impls::cs::CriticalSectionRawMutex
/// [`LocalRawMutex`]: crate::impls::local::LocalRawMutex
/// [`ThreadModeRawMutex`]: crate::impls::thread_mode::ThreadModeRawMutex
pub struct BlockingMutex<R, T: ?Sized> {
// NOTE: `raw` must be FIRST, so when using ThreadModeMutex the "can't drop in non-thread-mode" gets
// to run BEFORE dropping `data`.
raw: R,
taken: AtomicBool,
data: UnsafeCell<T>,
}
@ -64,7 +107,6 @@ impl<R: RawMutex, T> BlockingMutex<R, T> {
pub const fn new(val: T) -> BlockingMutex<R, T> {
BlockingMutex {
raw: R::INIT,
taken: AtomicBool::new(false),
data: UnsafeCell::new(val),
}
}
@ -72,16 +114,10 @@ impl<R: RawMutex, T> BlockingMutex<R, T> {
/// Creates a critical section and grants temporary access to the protected data.
pub fn lock<U>(&self, f: impl FnOnce(&mut T) -> U) -> U {
self.raw.lock(|| {
// NOTE: We only use load/store because the raw mutex guarantees we
// have guarded access
let old = self.taken.load(Ordering::Relaxed);
self.taken.store(true, Ordering::Relaxed);
assert!(!old, "Mutex taken re-entrantly");
let ptr = self.data.get();
// SAFETY: Raw Mutex proves we have exclusive access to the inner data
let inner = unsafe { &mut *ptr };
let retval = f(inner);
self.taken.store(false, Ordering::Relaxed);
retval
f(inner)
})
}
}
@ -94,7 +130,6 @@ impl<R, T> BlockingMutex<R, T> {
pub const fn const_new(raw_mutex: R, val: T) -> BlockingMutex<R, T> {
BlockingMutex {
raw: raw_mutex,
taken: AtomicBool::new(false),
data: UnsafeCell::new(val),
}
}