Rollup merge of #79039 - thomcc:weakly-relaxing, r=Amanieu

Tighten the bounds on atomic Ordering in std::sys::unix::weak::Weak

This moves reading this from multiple SeqCst reads to Relaxed read + Acquire fence if we are actually going to use the data.

Would love to avoid the Acquire fence, but doing so would need Ordering::Consume, which neither Rust, nor LLVM supports (a shame, since this fence is hardly free on ARM, which is what I was hoping to improve).

r? ``@Amanieu`` (Sorry for always picking you, but I know a lot of people wouldn't feel comfortable reviewing atomic ordering changes)
This commit is contained in:
Mara Bos 2020-11-18 15:46:27 +01:00 committed by GitHub
commit ad6fd9b037
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -24,7 +24,7 @@
use crate::ffi::CStr; use crate::ffi::CStr;
use crate::marker; use crate::marker;
use crate::mem; use crate::mem;
use crate::sync::atomic::{AtomicUsize, Ordering}; use crate::sync::atomic::{self, AtomicUsize, Ordering};
macro_rules! weak { macro_rules! weak {
(fn $name:ident($($t:ty),*) -> $ret:ty) => ( (fn $name:ident($($t:ty),*) -> $ret:ty) => (
@ -47,15 +47,49 @@ impl<F> Weak<F> {
pub fn get(&self) -> Option<F> { pub fn get(&self) -> Option<F> {
assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>()); assert_eq!(mem::size_of::<F>(), mem::size_of::<usize>());
unsafe { unsafe {
if self.addr.load(Ordering::SeqCst) == 1 { // Relaxed is fine here because we fence before reading through the
self.addr.store(fetch(self.name), Ordering::SeqCst); // pointer (see the comment below).
} match self.addr.load(Ordering::Relaxed) {
match self.addr.load(Ordering::SeqCst) { 1 => self.initialize(),
0 => None, 0 => None,
addr => Some(mem::transmute_copy::<usize, F>(&addr)), addr => {
let func = mem::transmute_copy::<usize, F>(&addr);
// The caller is presumably going to read through this value
// (by calling the function we've dlsymed). This means we'd
// need to have loaded it with at least C11's consume
// ordering in order to be guaranteed that the data we read
// from the pointer isn't from before the pointer was
// stored. Rust has no equivalent to memory_order_consume,
// so we use an acquire fence (sorry, ARM).
//
// Now, in practice this likely isn't needed even on CPUs
// where relaxed and consume mean different things. The
// symbols we're loading are probably present (or not) at
// init, and even if they aren't the runtime dynamic loader
// is extremely likely have sufficient barriers internally
// (possibly implicitly, for example the ones provided by
// invoking `mprotect`).
//
// That said, none of that's *guaranteed*, and so we fence.
atomic::fence(Ordering::Acquire);
Some(func)
}
} }
} }
} }
// Cold because it should only happen during first-time initalization.
#[cold]
unsafe fn initialize(&self) -> Option<F> {
let val = fetch(self.name);
// This synchronizes with the acquire fence in `get`.
self.addr.store(val, Ordering::Release);
match val {
0 => None,
addr => Some(mem::transmute_copy::<usize, F>(&addr)),
}
}
} }
unsafe fn fetch(name: &str) -> usize { unsafe fn fetch(name: &str) -> usize {