Rollup merge of #123811 - joboet:queue_em_up, r=ChrisDenton

Use queue-based `RwLock` on more platforms

This switches over Windows 7, SGX and Xous to the queue-based `RwLock` implementation added in #110211, thereby fixing #121949 for Windows 7 and partially resolving #114581 on SGX. TEEOS can't currently be switched because it doesn't have a good thread parking implementation.

CC `@roblabla` `@raoulstrackx` `@xobs` Could you help me test this, please?
r? `@ChrisDenton` the Windows stuff should be familiar to you
This commit is contained in:
Guillaume Gomez 2024-04-16 21:41:25 +02:00 committed by GitHub
commit 1176134114
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 58 additions and 387 deletions

View File

@ -0,0 +1,46 @@
//! The functions in this module are needed by libunwind. These symbols are named
//! in pre-link args for the target specification, so keep that in sync.
#![cfg(not(test))]
use crate::sys::sync::RwLock;
// Verify that the byte pattern libunwind uses to initialize an RwLock is
// equivalent to the value of RwLock::new(). If the value changes,
// `src/UnwindRustSgx.h` in libunwind needs to be changed too.
const _: () = unsafe {
let bits_rust: usize = crate::mem::transmute(RwLock::new());
assert!(bits_rust == 0);
};
const EINVAL: i32 = 22;
#[no_mangle]
pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RwLock) -> i32 {
if p.is_null() {
return EINVAL;
}
// We cannot differentiate between reads an writes in unlock and therefore
// always use a write-lock. Unwinding isn't really in the hot path anyway.
unsafe { (*p).write() };
return 0;
}
#[no_mangle]
pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 {
if p.is_null() {
return EINVAL;
}
unsafe { (*p).write() };
return 0;
}
#[no_mangle]
pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 {
if p.is_null() {
return EINVAL;
}
unsafe { (*p).write_unlock() };
return 0;
}

View File

@ -17,6 +17,7 @@ pub mod fd;
pub mod fs;
#[path = "../unsupported/io.rs"]
pub mod io;
mod libunwind_integration;
pub mod net;
pub mod os;
#[path = "../unsupported/pipe.rs"]

View File

@ -52,10 +52,6 @@ impl<T> WaitVariable<T> {
WaitVariable { queue: WaitQueue::new(), lock: var }
}
pub fn queue_empty(&self) -> bool {
self.queue.is_empty()
}
pub fn lock_var(&self) -> &T {
&self.lock
}
@ -68,7 +64,7 @@ impl<T> WaitVariable<T> {
#[derive(Copy, Clone)]
pub enum NotifiedTcs {
Single(Tcs),
All { count: NonZero<usize> },
All { _count: NonZero<usize> },
}
/// An RAII guard that will notify a set of target threads as well as unlock
@ -98,19 +94,6 @@ impl Default for WaitQueue {
}
}
impl<'a, T> WaitGuard<'a, T> {
/// Returns which TCSes will be notified when this guard drops.
pub fn notified_tcs(&self) -> NotifiedTcs {
self.notified_tcs
}
/// Drop this `WaitGuard`, after dropping another `guard`.
pub fn drop_after<U>(self, guard: U) {
drop(guard);
drop(self);
}
}
impl<'a, T> Deref for WaitGuard<'a, T> {
type Target = SpinMutexGuard<'a, WaitVariable<T>>;
@ -141,10 +124,6 @@ impl WaitQueue {
WaitQueue { inner: UnsafeList::new() }
}
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
/// Adds the calling thread to the `WaitVariable`'s wait queue, then wait
/// until a wakeup event.
///
@ -253,7 +232,10 @@ impl WaitQueue {
}
if let Some(count) = NonZero::new(count) {
Ok(WaitGuard { mutex_guard: Some(guard), notified_tcs: NotifiedTcs::All { count } })
Ok(WaitGuard {
mutex_guard: Some(guard),
notified_tcs: NotifiedTcs::All { _count: count },
})
} else {
Err(guard)
}

View File

@ -12,24 +12,20 @@ cfg_if::cfg_if! {
))] {
mod futex;
pub use futex::RwLock;
} else if #[cfg(target_family = "unix")] {
} else if #[cfg(any(
target_family = "unix",
all(target_os = "windows", target_vendor = "win7"),
all(target_vendor = "fortanix", target_env = "sgx"),
target_os = "xous",
))] {
mod queue;
pub use queue::RwLock;
} else if #[cfg(all(target_os = "windows", target_vendor = "win7"))] {
mod windows7;
pub use windows7::RwLock;
} else if #[cfg(all(target_vendor = "fortanix", target_env = "sgx"))] {
mod sgx;
pub use sgx::RwLock;
} else if #[cfg(target_os = "solid_asp3")] {
mod solid;
pub use solid::RwLock;
} else if #[cfg(target_os = "teeos")] {
mod teeos;
pub use teeos::RwLock;
} else if #[cfg(target_os = "xous")] {
mod xous;
pub use xous::RwLock;
} else {
mod no_threads;
pub use no_threads::RwLock;

View File

@ -1,219 +0,0 @@
#[cfg(test)]
mod tests;
use crate::alloc::Layout;
use crate::num::NonZero;
use crate::sys::pal::waitqueue::{
try_lock_or_false, NotifiedTcs, SpinMutex, SpinMutexGuard, WaitQueue, WaitVariable,
};
use crate::sys_common::lazy_box::{LazyBox, LazyInit};
struct AllocatedRwLock {
readers: SpinMutex<WaitVariable<Option<NonZero<usize>>>>,
writer: SpinMutex<WaitVariable<bool>>,
}
pub struct RwLock {
inner: LazyBox<AllocatedRwLock>,
}
impl LazyInit for AllocatedRwLock {
fn init() -> Box<Self> {
Box::new(AllocatedRwLock {
readers: SpinMutex::new(WaitVariable::new(None)),
writer: SpinMutex::new(WaitVariable::new(false)),
})
}
}
// Check at compile time that RwLock's size and alignment matches the C definition
// in libunwind (see also `test_c_rwlock_initializer` in `tests`).
const _: () = {
let rust = Layout::new::<RwLock>();
let c = Layout::new::<*mut ()>();
assert!(rust.size() == c.size());
assert!(rust.align() == c.align());
};
impl RwLock {
pub const fn new() -> RwLock {
RwLock { inner: LazyBox::new() }
}
#[inline]
pub fn read(&self) {
let lock = &*self.inner;
let mut rguard = lock.readers.lock();
let wguard = lock.writer.lock();
if *wguard.lock_var() || !wguard.queue_empty() {
// Another thread has or is waiting for the write lock, wait
drop(wguard);
WaitQueue::wait(rguard, || {});
// Another thread has passed the lock to us
} else {
// No waiting writers, acquire the read lock
*rguard.lock_var_mut() = NonZero::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
}
}
#[inline]
pub unsafe fn try_read(&self) -> bool {
let lock = &*self.inner;
let mut rguard = try_lock_or_false!(lock.readers);
let wguard = try_lock_or_false!(lock.writer);
if *wguard.lock_var() || !wguard.queue_empty() {
// Another thread has or is waiting for the write lock
false
} else {
// No waiting writers, acquire the read lock
*rguard.lock_var_mut() = NonZero::new(rguard.lock_var().map_or(0, |n| n.get()) + 1);
true
}
}
#[inline]
pub fn write(&self) {
let lock = &*self.inner;
let rguard = lock.readers.lock();
let mut wguard = lock.writer.lock();
if *wguard.lock_var() || rguard.lock_var().is_some() {
// Another thread has the lock, wait
drop(rguard);
WaitQueue::wait(wguard, || {});
// Another thread has passed the lock to us
} else {
// We are just now obtaining the lock
*wguard.lock_var_mut() = true;
}
}
#[inline]
pub fn try_write(&self) -> bool {
let lock = &*self.inner;
let rguard = try_lock_or_false!(lock.readers);
let mut wguard = try_lock_or_false!(lock.writer);
if *wguard.lock_var() || rguard.lock_var().is_some() {
// Another thread has the lock
false
} else {
// We are just now obtaining the lock
*wguard.lock_var_mut() = true;
true
}
}
#[inline]
unsafe fn __read_unlock(
&self,
mut rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZero<usize>>>>,
wguard: SpinMutexGuard<'_, WaitVariable<bool>>,
) {
*rguard.lock_var_mut() = NonZero::new(rguard.lock_var().unwrap().get() - 1);
if rguard.lock_var().is_some() {
// There are other active readers
} else {
if let Ok(mut wguard) = WaitQueue::notify_one(wguard) {
// A writer was waiting, pass the lock
*wguard.lock_var_mut() = true;
wguard.drop_after(rguard);
} else {
// No writers were waiting, the lock is released
rtassert!(rguard.queue_empty());
}
}
}
#[inline]
pub unsafe fn read_unlock(&self) {
let lock = &*self.inner;
let rguard = lock.readers.lock();
let wguard = lock.writer.lock();
unsafe { self.__read_unlock(rguard, wguard) };
}
#[inline]
unsafe fn __write_unlock(
&self,
rguard: SpinMutexGuard<'_, WaitVariable<Option<NonZero<usize>>>>,
wguard: SpinMutexGuard<'_, WaitVariable<bool>>,
) {
match WaitQueue::notify_one(wguard) {
Err(mut wguard) => {
// No writers waiting, release the write lock
*wguard.lock_var_mut() = false;
if let Ok(mut rguard) = WaitQueue::notify_all(rguard) {
// One or more readers were waiting, pass the lock to them
if let NotifiedTcs::All { count } = rguard.notified_tcs() {
*rguard.lock_var_mut() = Some(count)
} else {
unreachable!() // called notify_all
}
rguard.drop_after(wguard);
} else {
// No readers waiting, the lock is released
}
}
Ok(wguard) => {
// There was a thread waiting for write, just pass the lock
wguard.drop_after(rguard);
}
}
}
#[inline]
pub unsafe fn write_unlock(&self) {
let lock = &*self.inner;
let rguard = lock.readers.lock();
let wguard = lock.writer.lock();
unsafe { self.__write_unlock(rguard, wguard) };
}
// only used by __rust_rwlock_unlock below
#[inline]
#[cfg_attr(test, allow(dead_code))]
unsafe fn unlock(&self) {
let lock = &*self.inner;
let rguard = lock.readers.lock();
let wguard = lock.writer.lock();
if *wguard.lock_var() == true {
unsafe { self.__write_unlock(rguard, wguard) };
} else {
unsafe { self.__read_unlock(rguard, wguard) };
}
}
}
// The following functions are needed by libunwind. These symbols are named
// in pre-link args for the target specification, so keep that in sync.
#[cfg(not(test))]
const EINVAL: i32 = 22;
#[cfg(not(test))]
#[no_mangle]
pub unsafe extern "C" fn __rust_rwlock_rdlock(p: *mut RwLock) -> i32 {
if p.is_null() {
return EINVAL;
}
unsafe { (*p).read() };
return 0;
}
#[cfg(not(test))]
#[no_mangle]
pub unsafe extern "C" fn __rust_rwlock_wrlock(p: *mut RwLock) -> i32 {
if p.is_null() {
return EINVAL;
}
unsafe { (*p).write() };
return 0;
}
#[cfg(not(test))]
#[no_mangle]
pub unsafe extern "C" fn __rust_rwlock_unlock(p: *mut RwLock) -> i32 {
if p.is_null() {
return EINVAL;
}
unsafe { (*p).unlock() };
return 0;
}

View File

@ -1,21 +0,0 @@
use super::*;
use crate::ptr;
// Verify that the byte pattern libunwind uses to initialize an RwLock is
// equivalent to the value of RwLock::new(). If the value changes,
// `src/UnwindRustSgx.h` in libunwind needs to be changed too.
#[test]
fn test_c_rwlock_initializer() {
const C_RWLOCK_INIT: *mut () = ptr::null_mut();
// For the test to work, we need the padding/unused bytes in RwLock to be
// initialized as 0. In practice, this is the case with statics.
static RUST_RWLOCK_INIT: RwLock = RwLock::new();
unsafe {
// If the assertion fails, that not necessarily an issue with the value
// of C_RWLOCK_INIT. It might just be an issue with the way padding
// bytes are initialized in the test code.
assert_eq!(crate::mem::transmute_copy::<_, *mut ()>(&RUST_RWLOCK_INIT), C_RWLOCK_INIT);
};
}

View File

@ -1,40 +0,0 @@
use crate::cell::UnsafeCell;
use crate::sys::c;
pub struct RwLock {
inner: UnsafeCell<c::SRWLOCK>,
}
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {}
impl RwLock {
#[inline]
pub const fn new() -> RwLock {
RwLock { inner: UnsafeCell::new(c::SRWLOCK_INIT) }
}
#[inline]
pub fn read(&self) {
unsafe { c::AcquireSRWLockShared(self.inner.get()) }
}
#[inline]
pub fn try_read(&self) -> bool {
unsafe { c::TryAcquireSRWLockShared(self.inner.get()) != 0 }
}
#[inline]
pub fn write(&self) {
unsafe { c::AcquireSRWLockExclusive(self.inner.get()) }
}
#[inline]
pub fn try_write(&self) -> bool {
unsafe { c::TryAcquireSRWLockExclusive(self.inner.get()) != 0 }
}
#[inline]
pub unsafe fn read_unlock(&self) {
c::ReleaseSRWLockShared(self.inner.get())
}
#[inline]
pub unsafe fn write_unlock(&self) {
c::ReleaseSRWLockExclusive(self.inner.get())
}
}

View File

@ -1,74 +0,0 @@
use crate::sync::atomic::{AtomicIsize, Ordering::Acquire};
use crate::thread::yield_now;
pub struct RwLock {
/// The "mode" value indicates how many threads are waiting on this
/// Mutex. Possible values are:
/// -1: The lock is locked for writing
/// 0: The lock is unlocked
/// >=1: The lock is locked for reading
///
/// This currently spins waiting for the lock to be freed. An
/// optimization would be to involve the ticktimer server to
/// coordinate unlocks.
mode: AtomicIsize,
}
const RWLOCK_WRITING: isize = -1;
const RWLOCK_FREE: isize = 0;
unsafe impl Send for RwLock {}
unsafe impl Sync for RwLock {}
impl RwLock {
#[inline]
#[rustc_const_stable(feature = "const_locks", since = "1.63.0")]
pub const fn new() -> RwLock {
RwLock { mode: AtomicIsize::new(RWLOCK_FREE) }
}
#[inline]
pub unsafe fn read(&self) {
while !unsafe { self.try_read() } {
yield_now();
}
}
#[inline]
pub unsafe fn try_read(&self) -> bool {
self.mode
.fetch_update(
Acquire,
Acquire,
|v| if v == RWLOCK_WRITING { None } else { Some(v + 1) },
)
.is_ok()
}
#[inline]
pub unsafe fn write(&self) {
while !unsafe { self.try_write() } {
yield_now();
}
}
#[inline]
pub unsafe fn try_write(&self) -> bool {
self.mode.compare_exchange(RWLOCK_FREE, RWLOCK_WRITING, Acquire, Acquire).is_ok()
}
#[inline]
pub unsafe fn read_unlock(&self) {
let previous = self.mode.fetch_sub(1, Acquire);
assert!(previous != RWLOCK_FREE);
assert!(previous != RWLOCK_WRITING);
}
#[inline]
pub unsafe fn write_unlock(&self) {
assert_eq!(
self.mode.compare_exchange(RWLOCK_WRITING, RWLOCK_FREE, Acquire, Acquire),
Ok(RWLOCK_WRITING)
);
}
}