mirror of
https://github.com/rust-lang/rust.git
synced 2024-12-25 23:14:12 +00:00
Add the rest of the atomic operations.
This makes the handling of atomic operations more generic, which does impose a specific naming convention for the intrinsics, but that seems ok with me, rather than having an individual case for each name. It also adds the intrinsics to the the intrinsics file.
This commit is contained in:
parent
fd83b92b59
commit
befbd3a680
@ -11,7 +11,6 @@
|
||||
use core::prelude::*;
|
||||
|
||||
use back::{link, abi};
|
||||
use lib::llvm::{SequentiallyConsistent, Acquire, Release, Xchg};
|
||||
use lib::llvm::{TypeRef, ValueRef};
|
||||
use lib;
|
||||
use middle::trans::base::*;
|
||||
@ -578,118 +577,73 @@ pub fn trans_intrinsic(ccx: @mut CrateContext,
|
||||
let mut bcx = top_scope_block(fcx, None);
|
||||
let lltop = bcx.llbb;
|
||||
let first_real_arg = fcx.arg_pos(0u);
|
||||
match ccx.sess.str_of(item.ident).as_slice() {
|
||||
"atomic_cxchg" => {
|
||||
let old = AtomicCmpXchg(bcx,
|
||||
get_param(decl, first_real_arg),
|
||||
|
||||
let nm = ccx.sess.str_of(item.ident);
|
||||
let name = nm.as_slice();
|
||||
|
||||
// This requires that atomic intrinsics follow a specific naming pattern:
|
||||
// "atomic_<operation>[_<ordering>], and no ordering means SeqCst
|
||||
if name.starts_with("atomic_") {
|
||||
let split : ~[&str] = name.split_iter('_').collect();
|
||||
assert!(split.len() >= 2, "Atomic intrinsic not correct format");
|
||||
let order = if split.len() == 2 {
|
||||
lib::llvm::SequentiallyConsistent
|
||||
} else {
|
||||
match split[2] {
|
||||
"relaxed" => lib::llvm::Monotonic,
|
||||
"acq" => lib::llvm::Acquire,
|
||||
"rel" => lib::llvm::Release,
|
||||
"acqrel" => lib::llvm::AcquireRelease,
|
||||
_ => ccx.sess.fatal("Unknown ordering in atomic intrinsic")
|
||||
}
|
||||
};
|
||||
|
||||
match split[1] {
|
||||
"cxchg" => {
|
||||
let old = AtomicCmpXchg(bcx, get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
get_param(decl, first_real_arg + 2u),
|
||||
SequentiallyConsistent);
|
||||
order);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_cxchg_acq" => {
|
||||
let old = AtomicCmpXchg(bcx,
|
||||
"load" => {
|
||||
let old = AtomicLoad(bcx, get_param(decl, first_real_arg),
|
||||
order);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"store" => {
|
||||
AtomicStore(bcx, get_param(decl, first_real_arg + 1u),
|
||||
get_param(decl, first_real_arg),
|
||||
order);
|
||||
}
|
||||
op => {
|
||||
// These are all AtomicRMW ops
|
||||
let atom_op = match op {
|
||||
"xchg" => lib::llvm::Xchg,
|
||||
"xadd" => lib::llvm::Add,
|
||||
"xsub" => lib::llvm::Sub,
|
||||
"and" => lib::llvm::And,
|
||||
"nand" => lib::llvm::Nand,
|
||||
"or" => lib::llvm::Or,
|
||||
"xor" => lib::llvm::Xor,
|
||||
"max" => lib::llvm::Max,
|
||||
"min" => lib::llvm::Min,
|
||||
"umax" => lib::llvm::UMax,
|
||||
"umin" => lib::llvm::UMin,
|
||||
_ => ccx.sess.fatal("Unknown atomic operation")
|
||||
};
|
||||
|
||||
let old = AtomicRMW(bcx, atom_op, get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
get_param(decl, first_real_arg + 2u),
|
||||
Acquire);
|
||||
order);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_cxchg_rel" => {
|
||||
let old = AtomicCmpXchg(bcx,
|
||||
get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
get_param(decl, first_real_arg + 2u),
|
||||
Release);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_load" => {
|
||||
let old = AtomicLoad(bcx,
|
||||
get_param(decl, first_real_arg),
|
||||
SequentiallyConsistent);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_load_acq" => {
|
||||
let old = AtomicLoad(bcx,
|
||||
get_param(decl, first_real_arg),
|
||||
Acquire);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_store" => {
|
||||
AtomicStore(bcx,
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
get_param(decl, first_real_arg),
|
||||
SequentiallyConsistent);
|
||||
}
|
||||
"atomic_store_rel" => {
|
||||
AtomicStore(bcx,
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
get_param(decl, first_real_arg),
|
||||
Release);
|
||||
}
|
||||
"atomic_xchg" => {
|
||||
let old = AtomicRMW(bcx, Xchg,
|
||||
get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
SequentiallyConsistent);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_xchg_acq" => {
|
||||
let old = AtomicRMW(bcx, Xchg,
|
||||
get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
Acquire);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_xchg_rel" => {
|
||||
let old = AtomicRMW(bcx, Xchg,
|
||||
get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
Release);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_xadd" => {
|
||||
let old = AtomicRMW(bcx, lib::llvm::Add,
|
||||
get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
SequentiallyConsistent);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_xadd_acq" => {
|
||||
let old = AtomicRMW(bcx, lib::llvm::Add,
|
||||
get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
Acquire);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_xadd_rel" => {
|
||||
let old = AtomicRMW(bcx, lib::llvm::Add,
|
||||
get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
Release);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_xsub" => {
|
||||
let old = AtomicRMW(bcx, lib::llvm::Sub,
|
||||
get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
SequentiallyConsistent);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_xsub_acq" => {
|
||||
let old = AtomicRMW(bcx, lib::llvm::Sub,
|
||||
get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
Acquire);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
}
|
||||
"atomic_xsub_rel" => {
|
||||
let old = AtomicRMW(bcx, lib::llvm::Sub,
|
||||
get_param(decl, first_real_arg),
|
||||
get_param(decl, first_real_arg + 1u),
|
||||
Release);
|
||||
Store(bcx, old, fcx.llretptr.get());
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
match name {
|
||||
"size_of" => {
|
||||
let tp_ty = substs.tys[0];
|
||||
let lltp_ty = type_of::type_of(ccx, tp_ty);
|
||||
|
@ -117,22 +117,18 @@ pub fn type_uses_for(ccx: @mut CrateContext, fn_id: def_id, n_tps: uint)
|
||||
_,
|
||||
_) => {
|
||||
if abi.is_intrinsic() {
|
||||
let flags = match cx.ccx.sess.str_of(i.ident).as_slice() {
|
||||
let nm = cx.ccx.sess.str_of(i.ident);
|
||||
let name = nm.as_slice();
|
||||
let flags = if name.starts_with("atomic_") {
|
||||
0
|
||||
} else {
|
||||
match name {
|
||||
"size_of" | "pref_align_of" | "min_align_of" |
|
||||
"uninit" | "init" | "transmute" | "move_val" |
|
||||
"move_val_init" => use_repr,
|
||||
|
||||
"get_tydesc" | "needs_drop" => use_tydesc,
|
||||
|
||||
"atomic_cxchg" | "atomic_cxchg_acq"|
|
||||
"atomic_cxchg_rel"| "atomic_load" |
|
||||
"atomic_load_acq" | "atomic_store" |
|
||||
"atomic_store_rel"| "atomic_xchg" |
|
||||
"atomic_xadd" | "atomic_xsub" |
|
||||
"atomic_xchg_acq" | "atomic_xadd_acq" |
|
||||
"atomic_xsub_acq" | "atomic_xchg_rel" |
|
||||
"atomic_xadd_rel" | "atomic_xsub_rel" => 0,
|
||||
|
||||
"visit_tydesc" | "forget" | "frame_address" |
|
||||
"morestack_addr" => 0,
|
||||
|
||||
@ -157,6 +153,7 @@ pub fn type_uses_for(ccx: @mut CrateContext, fn_id: def_id, n_tps: uint)
|
||||
|
||||
// would be cool to make these an enum instead of strings!
|
||||
_ => fail!("unknown intrinsic in type_use")
|
||||
}
|
||||
};
|
||||
for uint::range(0u, n_tps) |n| { cx.uses[n] |= flags;}
|
||||
}
|
||||
|
@ -3434,8 +3434,49 @@ pub fn check_intrinsic_type(ccx: @mut CrateCtxt, it: @ast::foreign_item) {
|
||||
}
|
||||
|
||||
let tcx = ccx.tcx;
|
||||
let str = ccx.tcx.sess.str_of(it.ident);
|
||||
let (n_tps, inputs, output) = match str.as_slice() {
|
||||
let nm = ccx.tcx.sess.str_of(it.ident);
|
||||
let name = nm.as_slice();
|
||||
let (n_tps, inputs, output) = if name.starts_with("atomic_") {
|
||||
let split : ~[&str] = name.split_iter('_').collect();
|
||||
assert!(split.len() >= 2, "Atomic intrinsic not correct format");
|
||||
|
||||
//We only care about the operation here
|
||||
match split[1] {
|
||||
"cxchg" => (0, ~[ty::mk_mut_rptr(tcx,
|
||||
ty::re_bound(ty::br_anon(0)),
|
||||
ty::mk_int()),
|
||||
ty::mk_int(),
|
||||
ty::mk_int()
|
||||
], ty::mk_int()),
|
||||
"load" => (0,
|
||||
~[
|
||||
ty::mk_imm_rptr(tcx, ty::re_bound(ty::br_anon(0)), ty::mk_int())
|
||||
],
|
||||
ty::mk_int()),
|
||||
"store" => (0,
|
||||
~[
|
||||
ty::mk_mut_rptr(tcx, ty::re_bound(ty::br_anon(0)), ty::mk_int()),
|
||||
ty::mk_int()
|
||||
],
|
||||
ty::mk_nil()),
|
||||
|
||||
"xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" |
|
||||
"min" | "umax" | "umin" => {
|
||||
(0, ~[ty::mk_mut_rptr(tcx,
|
||||
ty::re_bound(ty::br_anon(0)),
|
||||
ty::mk_int()), ty::mk_int() ], ty::mk_int())
|
||||
}
|
||||
|
||||
op => {
|
||||
tcx.sess.span_err(it.span,
|
||||
fmt!("unrecognized atomic operation function: `%s`",
|
||||
op));
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
match name {
|
||||
"size_of" |
|
||||
"pref_align_of" | "min_align_of" => (1u, ~[], ty::mk_uint()),
|
||||
"init" => (1u, ~[], param(ccx, 0u)),
|
||||
@ -3452,32 +3493,6 @@ pub fn check_intrinsic_type(ccx: @mut CrateCtxt, it: @ast::foreign_item) {
|
||||
}
|
||||
"needs_drop" => (1u, ~[], ty::mk_bool()),
|
||||
|
||||
"atomic_cxchg" | "atomic_cxchg_acq"| "atomic_cxchg_rel" => {
|
||||
(0,
|
||||
~[
|
||||
ty::mk_mut_rptr(tcx,
|
||||
ty::re_bound(ty::br_anon(0)),
|
||||
ty::mk_int()),
|
||||
ty::mk_int(),
|
||||
ty::mk_int()
|
||||
],
|
||||
ty::mk_int())
|
||||
}
|
||||
"atomic_load" | "atomic_load_acq" => {
|
||||
(0,
|
||||
~[
|
||||
ty::mk_imm_rptr(tcx, ty::re_bound(ty::br_anon(0)), ty::mk_int())
|
||||
],
|
||||
ty::mk_int())
|
||||
}
|
||||
"atomic_store" | "atomic_store_rel" => {
|
||||
(0,
|
||||
~[
|
||||
ty::mk_mut_rptr(tcx, ty::re_bound(ty::br_anon(0)), ty::mk_int()),
|
||||
ty::mk_int()
|
||||
],
|
||||
ty::mk_nil())
|
||||
}
|
||||
"atomic_xchg" | "atomic_xadd" | "atomic_xsub" |
|
||||
"atomic_xchg_acq" | "atomic_xadd_acq" | "atomic_xsub_acq" |
|
||||
"atomic_xchg_rel" | "atomic_xadd_rel" | "atomic_xsub_rel" => {
|
||||
@ -3681,6 +3696,7 @@ pub fn check_intrinsic_type(ccx: @mut CrateCtxt, it: @ast::foreign_item) {
|
||||
*other));
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
let fty = ty::mk_bare_fn(tcx, ty::BareFnTy {
|
||||
purity: ast::unsafe_fn,
|
||||
|
@ -42,22 +42,38 @@ pub extern "rust-intrinsic" {
|
||||
/// Atomic compare and exchange, release ordering.
|
||||
pub fn atomic_cxchg_rel(dst: &mut int, old: int, src: int) -> int;
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_cxchg_acqrel(dst: &mut int, old: int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_cxchg_relaxed(dst: &mut int, old: int, src: int) -> int;
|
||||
|
||||
|
||||
/// Atomic load, sequentially consistent.
|
||||
pub fn atomic_load(src: &int) -> int;
|
||||
/// Atomic load, acquire ordering.
|
||||
pub fn atomic_load_acq(src: &int) -> int;
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_load_relaxed(src: &int) -> int;
|
||||
|
||||
/// Atomic store, sequentially consistent.
|
||||
pub fn atomic_store(dst: &mut int, val: int);
|
||||
/// Atomic store, release ordering.
|
||||
pub fn atomic_store_rel(dst: &mut int, val: int);
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_store_relaxed(dst: &mut int, val: int);
|
||||
|
||||
/// Atomic exchange, sequentially consistent.
|
||||
pub fn atomic_xchg(dst: &mut int, src: int) -> int;
|
||||
/// Atomic exchange, acquire ordering.
|
||||
pub fn atomic_xchg_acq(dst: &mut int, src: int) -> int;
|
||||
/// Atomic exchange, release ordering.
|
||||
pub fn atomic_xchg_rel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_xchg_acqrel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_xchg_relaxed(dst: &mut int, src: int) -> int;
|
||||
|
||||
/// Atomic addition, sequentially consistent.
|
||||
pub fn atomic_xadd(dst: &mut int, src: int) -> int;
|
||||
@ -65,6 +81,10 @@ pub extern "rust-intrinsic" {
|
||||
pub fn atomic_xadd_acq(dst: &mut int, src: int) -> int;
|
||||
/// Atomic addition, release ordering.
|
||||
pub fn atomic_xadd_rel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_xadd_acqrel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_xadd_relaxed(dst: &mut int, src: int) -> int;
|
||||
|
||||
/// Atomic subtraction, sequentially consistent.
|
||||
pub fn atomic_xsub(dst: &mut int, src: int) -> int;
|
||||
@ -72,6 +92,98 @@ pub extern "rust-intrinsic" {
|
||||
pub fn atomic_xsub_acq(dst: &mut int, src: int) -> int;
|
||||
/// Atomic subtraction, release ordering.
|
||||
pub fn atomic_xsub_rel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_xsub_acqrel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_xsub_relaxed(dst: &mut int, src: int) -> int;
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_and(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_and_acq(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_and_rel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_and_acqrel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_and_relaxed(dst: &mut int, src: int) -> int;
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_nand(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_nand_acq(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_nand_rel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_nand_acqrel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_nand_relaxed(dst: &mut int, src: int) -> int;
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_or(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_or_acq(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_or_rel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_or_acqrel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_or_relaxed(dst: &mut int, src: int) -> int;
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_xor(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_xor_acq(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_xor_rel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_xor_acqrel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_xor_relaxed(dst: &mut int, src: int) -> int;
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_max(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_max_acq(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_max_rel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_max_acqrel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_max_relaxed(dst: &mut int, src: int) -> int;
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_min(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_min_acq(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_min_rel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_min_acqrel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_min_relaxed(dst: &mut int, src: int) -> int;
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_umin(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_umin_acq(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_umin_rel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_umin_acqrel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_umin_relaxed(dst: &mut int, src: int) -> int;
|
||||
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_umin(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_umin_acq(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_umin_rel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_umin_acqrel(dst: &mut int, src: int) -> int;
|
||||
#[cfg(not(stage0))]
|
||||
pub fn atomic_umin_relaxed(dst: &mut int, src: int) -> int;
|
||||
|
||||
/// The size of a type in bytes.
|
||||
///
|
||||
|
Loading…
Reference in New Issue
Block a user