This commit is contained in:
Ross Smyth 2025-04-13 13:25:13 +02:00 committed by GitHub
commit 159a253146
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 312 additions and 166 deletions

View File

@ -613,6 +613,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
ptr: &'ll Value,
order: rustc_codegen_ssa::common::AtomicOrdering,
size: Size,
is_volatile: bool,
) -> &'ll Value {
unsafe {
let load = llvm::LLVMRustBuildAtomicLoad(
@ -621,6 +622,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
ptr,
UNNAMED,
AtomicOrdering::from_generic(order),
is_volatile,
);
// LLVM requires the alignment of atomic loads to be at least the size of the type.
llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
@ -851,6 +853,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
ptr: &'ll Value,
order: rustc_codegen_ssa::common::AtomicOrdering,
size: Size,
is_volatile: bool,
) {
debug!("Store {:?} -> {:?}", val, ptr);
assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
@ -860,6 +863,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
val,
ptr,
AtomicOrdering::from_generic(order),
is_volatile,
);
// LLVM requires the alignment of atomic stores to be at least the size of the type.
llvm::LLVMSetAlignment(store, size.bytes() as c_uint);

View File

@ -285,7 +285,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
_ => bug!("the va_arg intrinsic does not work with non-scalar types"),
}
}
sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = fn_args.type_at(0);
let ptr = args[0].immediate();

View File

@ -1970,6 +1970,7 @@ unsafe extern "C" {
PointerVal: &'a Value,
Name: *const c_char,
Order: AtomicOrdering,
isVolatile: bool,
) -> &'a Value;
pub(crate) fn LLVMRustBuildAtomicStore<'a>(
@ -1977,6 +1978,7 @@ unsafe extern "C" {
Val: &'a Value,
Ptr: &'a Value,
Order: AtomicOrdering,
isVolatile: bool,
) -> &'a Value;
pub(crate) fn LLVMRustTimeTraceProfilerInitialize();

View File

@ -396,6 +396,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
source,
parse_ordering(bx, ordering),
size,
false,
)
} else {
invalid_monomorphization(ty);
@ -409,7 +410,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let size = bx.layout_of(ty).size;
let val = args[1].immediate();
let ptr = args[0].immediate();
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size, false);
} else {
invalid_monomorphization(ty);
}
@ -491,7 +492,48 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
}
sym::volatile_load_atomic_relaxed => {
use crate::common::AtomicOrdering;
let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
let layout = bx.layout_of(ty);
let size = layout.size;
let source = args[0].immediate();
bx.atomic_load(
bx.backend_type(layout),
source,
AtomicOrdering::Relaxed,
size,
true,
);
} else {
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
span,
name,
ty,
});
}
return Ok(());
}
sym::volatile_store_atomic_relaxed => {
use crate::common::AtomicOrdering;
let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
let size = bx.layout_of(ty).size;
let val = args[1].immediate();
let ptr = args[0].immediate();
bx.atomic_store(val, ptr, AtomicOrdering::Relaxed, size, true);
} else {
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
span,
name,
ty,
});
}
return Ok(());
}
sym::nontemporal_store => {
let dst = args[0].deref(bx.cx());
args[1].val.nontemporal_store(bx, dst);

View File

@ -236,6 +236,7 @@ pub trait BuilderMethods<'a, 'tcx>:
ptr: Self::Value,
order: AtomicOrdering,
size: Size,
is_volatile: bool,
) -> Self::Value;
fn load_from_place(&mut self, ty: Self::Type, place: PlaceValue<Self::Value>) -> Self::Value {
assert_eq!(place.llextra, None);
@ -316,6 +317,7 @@ pub trait BuilderMethods<'a, 'tcx>:
ptr: Self::Value,
order: AtomicOrdering,
size: Size,
is_volatile: bool,
);
fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;

View File

@ -415,10 +415,14 @@ pub(crate) fn check_intrinsic_type(
sym::roundf64 => (0, 0, vec![tcx.types.f64], tcx.types.f64),
sym::roundf128 => (0, 0, vec![tcx.types.f128], tcx.types.f128),
sym::volatile_load | sym::unaligned_volatile_load => {
sym::volatile_load
| sym::unaligned_volatile_load
| sym::volatile_load_atomic_relaxed => {
(1, 0, vec![Ty::new_imm_ptr(tcx, param(0))], param(0))
}
sym::volatile_store | sym::unaligned_volatile_store => {
sym::volatile_store
| sym::unaligned_volatile_store
| sym::volatile_store_atomic_relaxed => {
(1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit)
}

View File

@ -606,19 +606,28 @@ extern "C" void LLVMRustSetAllowReassoc(LLVMValueRef V) {
extern "C" LLVMValueRef
LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Source,
const char *Name, LLVMAtomicOrdering Order) {
const char *Name, LLVMAtomicOrdering Order, LLVMBool isVolatile) {
Value *Ptr = unwrap(Source);
LoadInst *LI = unwrap(B)->CreateLoad(unwrap(Ty), Ptr, Name);
LI->setAtomic(fromRust(Order));
// atomic volatile
if (isVolatile)
LI->setVolatile(true);
return wrap(LI);
}
extern "C" LLVMValueRef LLVMRustBuildAtomicStore(LLVMBuilderRef B,
LLVMValueRef V,
LLVMValueRef Target,
LLVMAtomicOrdering Order) {
LLVMAtomicOrdering Order,
LLVMBool isVolatile) {
StoreInst *SI = unwrap(B)->CreateStore(unwrap(V), unwrap(Target));
SI->setAtomic(fromRust(Order));
// atomic volatile
if (isVolatile)
SI->setVolatile(true);
return wrap(SI);
}

View File

@ -2277,8 +2277,10 @@ symbols! {
volatile_copy_memory,
volatile_copy_nonoverlapping_memory,
volatile_load,
volatile_load_atomic_relaxed,
volatile_set_memory,
volatile_store,
volatile_store_atomic_relaxed,
vreg,
vreg_low16,
vsx,

View File

@ -1838,6 +1838,26 @@ pub unsafe fn unaligned_volatile_load<T>(src: *const T) -> T;
#[rustc_diagnostic_item = "intrinsics_unaligned_volatile_store"]
pub unsafe fn unaligned_volatile_store<T>(dst: *mut T, val: T);
/// Performs a volatile load from the `dst` pointer.
/// This pointer is required to be aligned and supported for
/// lock-free atomic operations.
///
/// It also creates a relaxed atomic ordering at this place.
#[rustc_intrinsic]
#[rustc_nounwind]
#[cfg(not(bootstrap))]
pub unsafe fn volatile_load_atomic_relaxed<T>(src: *const T) -> T;
/// Performs a volatile store to the `dst` pointer.
/// This pointer is required to be aligned, and the value supported
/// for lock-free atomic operations.
///
/// This also creates a relaxed atomic ordering on at this place.
#[rustc_intrinsic]
#[rustc_nounwind]
#[cfg(not(bootstrap))]
pub unsafe fn volatile_store_atomic_relaxed<T>(dst: *mut T, val: T);
/// Returns the square root of an `f16`
///
/// The stabilized version of this intrinsic is

View File

@ -426,9 +426,12 @@ pub use non_null::NonNull;
mod unique;
#[unstable(feature = "ptr_internals", issue = "none")]
pub use unique::Unique;
#[stable(feature = "volatile", since = "1.9.0")]
pub use volatile::{read_volatile, write_volatile};
mod const_ptr;
mod mut_ptr;
mod volatile;
/// Executes the destructor (if any) of the pointed-to value.
///
@ -1709,166 +1712,6 @@ pub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
}
}
/// Performs a volatile read of the value from `src` without moving it. This
/// leaves the memory in `src` unchanged.
///
/// Volatile operations are intended to act on I/O memory, and are guaranteed
/// to not be elided or reordered by the compiler across other volatile
/// operations.
///
/// # Notes
///
/// Rust does not currently have a rigorously and formally defined memory model,
/// so the precise semantics of what "volatile" means here is subject to change
/// over time. That being said, the semantics will almost always end up pretty
/// similar to [C11's definition of volatile][c11].
///
/// The compiler shouldn't change the relative order or number of volatile
/// memory operations. However, volatile memory operations on zero-sized types
/// (e.g., if a zero-sized type is passed to `read_volatile`) are noops
/// and may be ignored.
///
/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
///
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// * `src` must be [valid] for reads.
///
/// * `src` must be properly aligned.
///
/// * `src` must point to a properly initialized value of type `T`.
///
/// Like [`read`], `read_volatile` creates a bitwise copy of `T`, regardless of
/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
/// value and the value at `*src` can [violate memory safety][read-ownership].
/// However, storing non-[`Copy`] types in volatile memory is almost certainly
/// incorrect.
///
/// Note that even if `T` has size `0`, the pointer must be properly aligned.
///
/// [valid]: self#safety
/// [read-ownership]: read#ownership-of-the-returned-value
///
/// Just like in C, whether an operation is volatile has no bearing whatsoever
/// on questions involving concurrent access from multiple threads. Volatile
/// accesses behave exactly like non-atomic accesses in that regard. In particular,
/// a race between a `read_volatile` and any write operation to the same location
/// is undefined behavior.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let x = 12;
/// let y = &x as *const i32;
///
/// unsafe {
/// assert_eq!(std::ptr::read_volatile(y), 12);
/// }
/// ```
#[inline]
#[stable(feature = "volatile", since = "1.9.0")]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
#[rustc_diagnostic_item = "ptr_read_volatile"]
pub unsafe fn read_volatile<T>(src: *const T) -> T {
// SAFETY: the caller must uphold the safety contract for `volatile_load`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::read_volatile requires that the pointer argument is aligned and non-null",
(
addr: *const () = src as *const (),
align: usize = align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
intrinsics::volatile_load(src)
}
}
/// Performs a volatile write of a memory location with the given value without
/// reading or dropping the old value.
///
/// Volatile operations are intended to act on I/O memory, and are guaranteed
/// to not be elided or reordered by the compiler across other volatile
/// operations.
///
/// `write_volatile` does not drop the contents of `dst`. This is safe, but it
/// could leak allocations or resources, so care should be taken not to overwrite
/// an object that should be dropped.
///
/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
/// location pointed to by `dst`.
///
/// # Notes
///
/// Rust does not currently have a rigorously and formally defined memory model,
/// so the precise semantics of what "volatile" means here is subject to change
/// over time. That being said, the semantics will almost always end up pretty
/// similar to [C11's definition of volatile][c11].
///
/// The compiler shouldn't change the relative order or number of volatile
/// memory operations. However, volatile memory operations on zero-sized types
/// (e.g., if a zero-sized type is passed to `write_volatile`) are noops
/// and may be ignored.
///
/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
///
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// * `dst` must be [valid] for writes.
///
/// * `dst` must be properly aligned.
///
/// Note that even if `T` has size `0`, the pointer must be properly aligned.
///
/// [valid]: self#safety
///
/// Just like in C, whether an operation is volatile has no bearing whatsoever
/// on questions involving concurrent access from multiple threads. Volatile
/// accesses behave exactly like non-atomic accesses in that regard. In particular,
/// a race between a `write_volatile` and any other operation (reading or writing)
/// on the same location is undefined behavior.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut x = 0;
/// let y = &mut x as *mut i32;
/// let z = 12;
///
/// unsafe {
/// std::ptr::write_volatile(y, z);
/// assert_eq!(std::ptr::read_volatile(y), 12);
/// }
/// ```
#[inline]
#[stable(feature = "volatile", since = "1.9.0")]
#[rustc_diagnostic_item = "ptr_write_volatile"]
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
// SAFETY: the caller must uphold the safety contract for `volatile_store`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::write_volatile requires that the pointer argument is aligned and non-null",
(
addr: *mut () = dst as *mut (),
align: usize = align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
intrinsics::volatile_store(dst, src);
}
}
/// Align pointer `p`.
///
/// Calculate offset (in terms of elements of `size_of::<T>()` stride) that has to be applied

View File

@ -0,0 +1,219 @@
use crate::mem::SizedTypeProperties;
#[cfg(not(bootstrap))]
use crate::sync::atomic::{AtomicU8, AtomicU16, AtomicU32, AtomicU64};
use crate::{cfg, intrinsics};
/// Performs a volatile read of the value from `src` without moving it. This
/// leaves the memory in `src` unchanged.
///
/// Volatile operations are intended to act on I/O memory, and are guaranteed
/// to not be elided or reordered by the compiler across other volatile
/// operations.
///
/// # Notes
///
/// Rust does not currently have a rigorously and formally defined memory model,
/// so the precise semantics of what "volatile" means here is subject to change
/// over time. That being said, the semantics will almost always end up pretty
/// similar to [C11's definition of volatile][c11].
///
/// The compiler shouldn't change the relative order or number of volatile
/// memory operations. However, volatile memory operations on zero-sized types
/// (e.g., if a zero-sized type is passed to `read_volatile`) are noops
/// and may be ignored.
///
/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
///
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// * `src` must be [valid] for reads.
///
/// * `src` must be properly aligned.
///
/// * `src` must point to a properly initialized value of type `T`.
///
/// Like [read], `read_volatile` creates a bitwise copy of `T`, regardless of
/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned
/// value and the value at `*src` can [violate memory safety][read-ownership].
/// However, storing non-[`Copy`] types in volatile memory is almost certainly
/// incorrect.
///
/// Note that even if `T` has size `0`, the pointer must be properly aligned.
///
/// [valid]: crate::ptr#safety
/// [read-ownership]: crate::ptr::read#ownership-of-the-returned-value
/// [read]: crate::ptr::read
///
/// Just like in C, whether an operation is volatile has no bearing whatsoever
/// on questions involving concurrent access from multiple threads. Volatile
/// accesses behave exactly like non-atomic accesses in that regard. In particular,
/// a race between a `read_volatile` and any write operation to the same location
/// is undefined behavior.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let x = 12;
/// let y = &x as *const i32;
///
/// unsafe {
/// assert_eq!(std::ptr::read_volatile(y), 12);
/// }
/// ```
#[inline]
#[stable(feature = "volatile", since = "1.9.0")]
#[cfg_attr(miri, track_caller)] // Even without panics, this helps for Miri backtraces
#[rustc_diagnostic_item = "ptr_read_volatile"]
pub unsafe fn read_volatile<T>(src: *const T) -> T {
// SAFETY: the caller must uphold the safety contract for `volatile_load`.
unsafe {
crate::ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::read_volatile requires that the pointer argument is aligned and non-null",
(
addr: *const () = src as *const (),
align: usize = align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => crate::ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
// TODO: Guard patterns
#[cfg(not(bootstrap))]
match size_of::<T>() {
1 if cfg!(target_has_atomic_load_store = "8")
&& align_of::<T>() == align_of::<AtomicU8>() =>
{
intrinsics::volatile_load_atomic_relaxed(src)
}
2 if cfg!(target_has_atomic_load_store = "16")
&& align_of::<T>() == align_of::<AtomicU16>() =>
{
intrinsics::volatile_load_atomic_relaxed(src)
}
4 if cfg!(target_has_atomic_load_store = "32")
&& align_of::<T>() == align_of::<AtomicU32>() =>
{
intrinsics::volatile_load_atomic_relaxed(src)
}
8 if cfg!(target_has_atomic_load_store = "64")
&& align_of::<T>() == align_of::<AtomicU64>() =>
{
intrinsics::volatile_load_atomic_relaxed(src)
}
_ => intrinsics::volatile_load(src),
}
#[cfg(bootstrap)]
intrinsics::volatile_load(src)
}
}
/// Performs a volatile write of a memory location with the given value without
/// reading or dropping the old value.
///
/// Volatile operations are intended to act on I/O memory, and are guaranteed
/// to not be elided or reordered by the compiler across other volatile
/// operations.
///
/// `write_volatile` does not drop the contents of `dst`. This is safe, but it
/// could leak allocations or resources, so care should be taken not to overwrite
/// an object that should be dropped.
///
/// Additionally, it does not drop `src`. Semantically, `src` is moved into the
/// location pointed to by `dst`.
///
/// # Notes
///
/// Rust does not currently have a rigorously and formally defined memory model,
/// so the precise semantics of what "volatile" means here is subject to change
/// over time. That being said, the semantics will almost always end up pretty
/// similar to [C11's definition of volatile][c11].
///
/// The compiler shouldn't change the relative order or number of volatile
/// memory operations. However, volatile memory operations on zero-sized types
/// (e.g., if a zero-sized type is passed to `write_volatile`) are noops
/// and may be ignored.
///
/// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf
///
/// # Safety
///
/// Behavior is undefined if any of the following conditions are violated:
///
/// * `dst` must be [valid] for writes.
///
/// * `dst` must be properly aligned.
///
/// Note that even if `T` has size `0`, the pointer must be properly aligned.
///
/// [valid]: crate::ptr#safety
///
/// Just like in C, whether an operation is volatile has no bearing whatsoever
/// on questions involving concurrent access from multiple threads. Volatile
/// accesses behave exactly like non-atomic accesses in that regard. In particular,
/// a race between a `write_volatile` and any other operation (reading or writing)
/// on the same location is undefined behavior.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let mut x = 0;
/// let y = &mut x as *mut i32;
/// let z = 12;
///
/// unsafe {
/// std::ptr::write_volatile(y, z);
/// assert_eq!(std::ptr::read_volatile(y), 12);
/// }
/// ```
#[inline]
#[stable(feature = "volatile", since = "1.9.0")]
#[cfg_attr(miri, track_caller)] // Even without panics, this helps for Miri backtraces
#[rustc_diagnostic_item = "ptr_write_volatile"]
pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
// SAFETY: the caller must uphold the safety contract for `volatile_write`.
unsafe {
crate::ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::write_volatile requires that the pointer argument is aligned and non-null",
(
addr: *mut () = dst as *mut (),
align: usize = crate::mem::align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => crate::ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
// TODO: Guard patterns
#[cfg(not(bootstrap))]
match size_of::<T>() {
1 if cfg!(target_has_atomic_load_store = "8")
&& align_of::<T>() == align_of::<AtomicU8>() =>
{
intrinsics::volatile_store_atomic_relaxed(dst, src)
}
2 if cfg!(target_has_atomic_load_store = "16")
&& align_of::<T>() == align_of::<AtomicU16>() =>
{
intrinsics::volatile_store_atomic_relaxed(dst, src)
}
4 if cfg!(target_has_atomic_load_store = "32")
&& align_of::<T>() == align_of::<AtomicU32>() =>
{
intrinsics::volatile_store_atomic_relaxed(dst, src)
}
8 if cfg!(target_has_atomic_load_store = "64")
&& align_of::<T>() == align_of::<AtomicU64>() =>
{
intrinsics::volatile_store_atomic_relaxed(dst, src)
}
_ => intrinsics::volatile_store(dst, src),
}
#[cfg(bootstrap)]
intrinsics::volatile_store(dst, src)
}
}