mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 23:04:33 +00:00
Auto merge of #77611 - oli-obk:atomic_miri_leakage, r=nagisa
Directly use raw pointers in `AtomicPtr` store/load I was unable to find any reason for this limitation in the latest source of LLVM or in the documentation [here](http://llvm.org/docs/Atomics.html#libcalls-atomic). fixes https://github.com/rust-lang/miri/issues/1574
This commit is contained in:
commit
f0f68778f7
@ -146,12 +146,12 @@ macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $
|
||||
|
||||
macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
|
||||
match $ty.kind() {
|
||||
ty::Uint(_) | ty::Int(_) => {}
|
||||
ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
|
||||
_ => {
|
||||
$fx.tcx.sess.span_err(
|
||||
$span,
|
||||
&format!(
|
||||
"`{}` intrinsic: expected basic integer type, found `{:?}`",
|
||||
"`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
|
||||
$intrinsic, $ty
|
||||
),
|
||||
);
|
||||
|
@ -437,16 +437,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
match split[1] {
|
||||
"cxchg" | "cxchgweak" => {
|
||||
let ty = substs.type_at(0);
|
||||
if int_type_width_signed(ty, bx.tcx()).is_some() {
|
||||
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
||||
let weak = split[1] == "cxchgweak";
|
||||
let pair = bx.atomic_cmpxchg(
|
||||
args[0].immediate(),
|
||||
args[1].immediate(),
|
||||
args[2].immediate(),
|
||||
order,
|
||||
failorder,
|
||||
weak,
|
||||
);
|
||||
let mut dst = args[0].immediate();
|
||||
let mut cmp = args[1].immediate();
|
||||
let mut src = args[2].immediate();
|
||||
if ty.is_unsafe_ptr() {
|
||||
// Some platforms do not support atomic operations on pointers,
|
||||
// so we cast to integer first.
|
||||
let ptr_llty = bx.type_ptr_to(bx.type_isize());
|
||||
dst = bx.pointercast(dst, ptr_llty);
|
||||
cmp = bx.ptrtoint(cmp, bx.type_isize());
|
||||
src = bx.ptrtoint(src, bx.type_isize());
|
||||
}
|
||||
let pair = bx.atomic_cmpxchg(dst, cmp, src, order, failorder, weak);
|
||||
let val = bx.extract_value(pair, 0);
|
||||
let success = bx.extract_value(pair, 1);
|
||||
let val = bx.from_immediate(val);
|
||||
@ -464,9 +468,23 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
"load" => {
|
||||
let ty = substs.type_at(0);
|
||||
if int_type_width_signed(ty, bx.tcx()).is_some() {
|
||||
let size = bx.layout_of(ty).size;
|
||||
bx.atomic_load(args[0].immediate(), order, size)
|
||||
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
||||
let layout = bx.layout_of(ty);
|
||||
let size = layout.size;
|
||||
let mut source = args[0].immediate();
|
||||
if ty.is_unsafe_ptr() {
|
||||
// Some platforms do not support atomic operations on pointers,
|
||||
// so we cast to integer first...
|
||||
let ptr_llty = bx.type_ptr_to(bx.type_isize());
|
||||
source = bx.pointercast(source, ptr_llty);
|
||||
}
|
||||
let result = bx.atomic_load(source, order, size);
|
||||
if ty.is_unsafe_ptr() {
|
||||
// ... and then cast the result back to a pointer
|
||||
bx.inttoptr(result, bx.backend_type(layout))
|
||||
} else {
|
||||
result
|
||||
}
|
||||
} else {
|
||||
return invalid_monomorphization(ty);
|
||||
}
|
||||
@ -474,9 +492,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||
|
||||
"store" => {
|
||||
let ty = substs.type_at(0);
|
||||
if int_type_width_signed(ty, bx.tcx()).is_some() {
|
||||
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
|
||||
let size = bx.layout_of(ty).size;
|
||||
bx.atomic_store(args[1].immediate(), args[0].immediate(), order, size);
|
||||
let mut val = args[1].immediate();
|
||||
let mut ptr = args[0].immediate();
|
||||
if ty.is_unsafe_ptr() {
|
||||
// Some platforms do not support atomic operations on pointers,
|
||||
// so we cast to integer first.
|
||||
let ptr_llty = bx.type_ptr_to(bx.type_isize());
|
||||
ptr = bx.pointercast(ptr, ptr_llty);
|
||||
val = bx.ptrtoint(val, bx.type_isize());
|
||||
}
|
||||
bx.atomic_store(val, ptr, order, size);
|
||||
return;
|
||||
} else {
|
||||
return invalid_monomorphization(ty);
|
||||
|
@ -966,8 +966,16 @@ impl<T> AtomicPtr<T> {
|
||||
#[inline]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn load(&self, order: Ordering) -> *mut T {
|
||||
#[cfg(not(bootstrap))]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T }
|
||||
unsafe {
|
||||
atomic_load(self.p.get(), order)
|
||||
}
|
||||
#[cfg(bootstrap)]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_load(self.p.get() as *mut usize, order) as *mut T
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores a value into the pointer.
|
||||
@ -994,6 +1002,12 @@ impl<T> AtomicPtr<T> {
|
||||
#[inline]
|
||||
#[stable(feature = "rust1", since = "1.0.0")]
|
||||
pub fn store(&self, ptr: *mut T, order: Ordering) {
|
||||
#[cfg(not(bootstrap))]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_store(self.p.get(), ptr, order);
|
||||
}
|
||||
#[cfg(bootstrap)]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_store(self.p.get() as *mut usize, ptr as usize, order);
|
||||
@ -1105,6 +1119,7 @@ impl<T> AtomicPtr<T> {
|
||||
success: Ordering,
|
||||
failure: Ordering,
|
||||
) -> Result<*mut T, *mut T> {
|
||||
#[cfg(bootstrap)]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
let res = atomic_compare_exchange(
|
||||
@ -1119,6 +1134,11 @@ impl<T> AtomicPtr<T> {
|
||||
Err(x) => Err(x as *mut T),
|
||||
}
|
||||
}
|
||||
#[cfg(not(bootstrap))]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
atomic_compare_exchange(self.p.get(), current, new, success, failure)
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores a value into the pointer if the current value is the same as the `current` value.
|
||||
@ -1165,6 +1185,7 @@ impl<T> AtomicPtr<T> {
|
||||
success: Ordering,
|
||||
failure: Ordering,
|
||||
) -> Result<*mut T, *mut T> {
|
||||
#[cfg(bootstrap)]
|
||||
// SAFETY: data races are prevented by atomic intrinsics.
|
||||
unsafe {
|
||||
let res = atomic_compare_exchange_weak(
|
||||
@ -1179,6 +1200,14 @@ impl<T> AtomicPtr<T> {
|
||||
Err(x) => Err(x as *mut T),
|
||||
}
|
||||
}
|
||||
#[cfg(not(bootstrap))]
|
||||
// SAFETY: This intrinsic is unsafe because it operates on a raw pointer
|
||||
// but we know for sure that the pointer is valid (we just got it from
|
||||
// an `UnsafeCell` that we have by reference) and the atomic operation
|
||||
// itself allows us to safely mutate the `UnsafeCell` contents.
|
||||
unsafe {
|
||||
atomic_compare_exchange_weak(self.p.get(), current, new, success, failure)
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetches the value, and applies a function to it that returns an optional
|
||||
|
@ -110,16 +110,6 @@ struct Node {
|
||||
next: *mut Node,
|
||||
}
|
||||
|
||||
#[cfg(miri)]
|
||||
extern "Rust" {
|
||||
/// Miri-provided extern function to mark the block `ptr` points to as a "root"
|
||||
/// for some static memory. This memory and everything reachable by it is not
|
||||
/// considered leaking even if it still exists when the program terminates.
|
||||
///
|
||||
/// `ptr` has to point to the beginning of an allocated block.
|
||||
fn miri_static_root(ptr: *const u8);
|
||||
}
|
||||
|
||||
unsafe fn register_dtor(key: Key, dtor: Dtor) {
|
||||
let mut node = Box::new(Node { key, dtor, next: ptr::null_mut() });
|
||||
|
||||
@ -128,9 +118,6 @@ unsafe fn register_dtor(key: Key, dtor: Dtor) {
|
||||
node.next = head;
|
||||
match DTORS.compare_exchange(head, &mut *node, SeqCst, SeqCst) {
|
||||
Ok(_) => {
|
||||
#[cfg(miri)]
|
||||
miri_static_root(&*node as *const _ as *const u8);
|
||||
|
||||
mem::forget(node);
|
||||
return;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user