add volatile copy/copy_nonoverlapping/set

This exposes volatile versions of the memset/memmove/memcpy intrinsics.

The volatile parameter must be constant, so this can't simply be a
parameter to our intrinsics.
This commit is contained in:
Daniel Micay 2014-04-22 19:51:14 -04:00
parent 09bfb92fdc
commit b2724727d5
3 changed files with 43 additions and 32 deletions

View File

@ -129,7 +129,7 @@ pub fn trans_intrinsic(ccx: &CrateContext,
RetVoid(bcx); RetVoid(bcx);
} }
fn copy_intrinsic(bcx: &Block, allow_overlap: bool, tp_ty: ty::t) { fn copy_intrinsic(bcx: &Block, allow_overlap: bool, volatile: bool, tp_ty: ty::t) {
let ccx = bcx.ccx(); let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty); let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32); let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
@ -154,13 +154,12 @@ pub fn trans_intrinsic(ccx: &CrateContext,
let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx)); let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx));
let src_ptr = PointerCast(bcx, get_param(decl, first_real_arg + 1), Type::i8p(ccx)); let src_ptr = PointerCast(bcx, get_param(decl, first_real_arg + 1), Type::i8p(ccx));
let count = get_param(decl, first_real_arg + 2); let count = get_param(decl, first_real_arg + 2);
let volatile = C_i1(ccx, false);
let llfn = ccx.get_intrinsic(&name); let llfn = ccx.get_intrinsic(&name);
Call(bcx, llfn, [dst_ptr, src_ptr, Mul(bcx, size, count), align, volatile], []); Call(bcx, llfn, [dst_ptr, src_ptr, Mul(bcx, size, count), align, C_i1(ccx, volatile)], []);
RetVoid(bcx); RetVoid(bcx);
} }
fn memset_intrinsic(bcx: &Block, tp_ty: ty::t) { fn memset_intrinsic(bcx: &Block, volatile: bool, tp_ty: ty::t) {
let ccx = bcx.ccx(); let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty); let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32); let align = C_i32(ccx, machine::llalign_of_min(ccx, lltp_ty) as i32);
@ -176,9 +175,8 @@ pub fn trans_intrinsic(ccx: &CrateContext,
let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx)); let dst_ptr = PointerCast(bcx, get_param(decl, first_real_arg), Type::i8p(ccx));
let val = get_param(decl, first_real_arg + 1); let val = get_param(decl, first_real_arg + 1);
let count = get_param(decl, first_real_arg + 2); let count = get_param(decl, first_real_arg + 2);
let volatile = C_i1(ccx, false);
let llfn = ccx.get_intrinsic(&name); let llfn = ccx.get_intrinsic(&name);
Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align, volatile], []); Call(bcx, llfn, [dst_ptr, val, Mul(bcx, size, count), align, C_i1(ccx, volatile)], []);
RetVoid(bcx); RetVoid(bcx);
} }
@ -466,11 +464,15 @@ pub fn trans_intrinsic(ccx: &CrateContext,
let lladdr = InBoundsGEP(bcx, ptr, [offset]); let lladdr = InBoundsGEP(bcx, ptr, [offset]);
Ret(bcx, lladdr); Ret(bcx, lladdr);
} }
"copy_nonoverlapping_memory" => { "copy_nonoverlapping_memory" => copy_intrinsic(bcx, false, false, *substs.tys.get(0)),
copy_intrinsic(bcx, false, *substs.tys.get(0)) "copy_memory" => copy_intrinsic(bcx, true, false, *substs.tys.get(0)),
} "set_memory" => memset_intrinsic(bcx, false, *substs.tys.get(0)),
"copy_memory" => copy_intrinsic(bcx, true, *substs.tys.get(0)),
"set_memory" => memset_intrinsic(bcx, *substs.tys.get(0)), "volatile_copy_nonoverlapping_memory" =>
copy_intrinsic(bcx, false, true, *substs.tys.get(0)),
"volatile_copy_memory" => copy_intrinsic(bcx, true, true, *substs.tys.get(0)),
"volatile_set_memory" => memset_intrinsic(bcx, true, *substs.tys.get(0)),
"ctlz8" => count_zeros_intrinsic(bcx, "llvm.ctlz.i8"), "ctlz8" => count_zeros_intrinsic(bcx, "llvm.ctlz.i8"),
"ctlz16" => count_zeros_intrinsic(bcx, "llvm.ctlz.i16"), "ctlz16" => count_zeros_intrinsic(bcx, "llvm.ctlz.i16"),
"ctlz32" => count_zeros_intrinsic(bcx, "llvm.ctlz.i32"), "ctlz32" => count_zeros_intrinsic(bcx, "llvm.ctlz.i32"),

View File

@ -4127,7 +4127,8 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) {
mutbl: ast::MutImmutable mutbl: ast::MutImmutable
})) }))
} }
"copy_nonoverlapping_memory" => { "copy_memory" | "copy_nonoverlapping_memory" |
"volatile_copy_memory" | "volatile_copy_nonoverlapping_memory" => {
(1, (1,
vec!( vec!(
ty::mk_ptr(tcx, ty::mt { ty::mk_ptr(tcx, ty::mt {
@ -4142,22 +4143,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &ast::ForeignItem) {
), ),
ty::mk_nil()) ty::mk_nil())
} }
"copy_memory" => { "set_memory" | "volatile_set_memory" => {
(1,
vec!(
ty::mk_ptr(tcx, ty::mt {
ty: param(ccx, 0),
mutbl: ast::MutMutable
}),
ty::mk_ptr(tcx, ty::mt {
ty: param(ccx, 0),
mutbl: ast::MutImmutable
}),
ty::mk_uint()
),
ty::mk_nil())
}
"set_memory" => {
(1, (1,
vec!( vec!(
ty::mk_ptr(tcx, ty::mt { ty::mk_ptr(tcx, ty::mt {

View File

@ -261,10 +261,6 @@ extern "rust-intrinsic" {
/// Execute a breakpoint trap, for inspection by a debugger. /// Execute a breakpoint trap, for inspection by a debugger.
pub fn breakpoint(); pub fn breakpoint();
pub fn volatile_load<T>(src: *T) -> T;
pub fn volatile_store<T>(dst: *mut T, val: T);
/// The size of a type in bytes. /// The size of a type in bytes.
/// ///
/// This is the exact number of bytes in memory taken up by a /// This is the exact number of bytes in memory taken up by a
@ -338,6 +334,33 @@ extern "rust-intrinsic" {
/// `min_align_of::<T>()` /// `min_align_of::<T>()`
pub fn set_memory<T>(dst: *mut T, val: u8, count: uint); pub fn set_memory<T>(dst: *mut T, val: u8, count: uint);
/// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with
/// a size of `count` * `size_of::<T>()` and an alignment of
/// `min_align_of::<T>()`
///
/// The volatile parameter parameter is set to `true`, so it will not be optimized out.
#[cfg(not(stage0))]
pub fn volatile_copy_nonoverlapping_memory<T>(dst: *mut T, src: *T, count: uint);
/// Equivalent to the appropriate `llvm.memmove.p0i8.0i8.*` intrinsic, with
/// a size of `count` * `size_of::<T>()` and an alignment of
/// `min_align_of::<T>()`
///
/// The volatile parameter parameter is set to `true`, so it will not be optimized out.
#[cfg(not(stage0))]
pub fn volatile_copy_memory<T>(dst: *mut T, src: *T, count: uint);
/// Equivalent to the appropriate `llvm.memset.p0i8.*` intrinsic, with a
/// size of `count` * `size_of::<T>()` and an alignment of
/// `min_align_of::<T>()`.
///
/// The volatile parameter parameter is set to `true`, so it will not be optimized out.
#[cfg(not(stage0))]
pub fn volatile_set_memory<T>(dst: *mut T, val: u8, count: uint);
/// Perform a volatile load from the `src` pointer.
pub fn volatile_load<T>(src: *T) -> T;
/// Perform a volatile store to the `dst` pointer.
pub fn volatile_store<T>(dst: *mut T, val: T);
pub fn sqrtf32(x: f32) -> f32; pub fn sqrtf32(x: f32) -> f32;
pub fn sqrtf64(x: f64) -> f64; pub fn sqrtf64(x: f64) -> f64;