Do some atomics

This commit is contained in:
khyperia 2020-09-07 15:26:34 +02:00
parent 81f868d4ec
commit 148f7ba163
2 changed files with 268 additions and 28 deletions

View File

@ -2,7 +2,7 @@ use super::Builder;
use crate::builder_spirv::{BuilderCursor, SpirvValueExt};
use crate::spirv_type::SpirvType;
use rspirv::dr::Operand;
use rspirv::spirv::StorageClass;
use rspirv::spirv::{MemorySemantics, Scope, StorageClass};
use rustc_codegen_ssa::common::{
AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope,
};
@ -64,6 +64,20 @@ macro_rules! simple_uni_op {
};
}
fn ordering_to_semantics(ordering: AtomicOrdering) -> MemorySemantics {
use AtomicOrdering::*;
// TODO: Someone verify/fix this, I don't know atomics well
match ordering {
NotAtomic => MemorySemantics::NONE,
Unordered => MemorySemantics::NONE,
Monotonic => MemorySemantics::NONE,
Acquire => MemorySemantics::ACQUIRE,
Release => MemorySemantics::RELEASE,
AcquireRelease => MemorySemantics::ACQUIRE_RELEASE,
SequentiallyConsistent => MemorySemantics::SEQUENTIALLY_CONSISTENT,
}
}
impl<'a, 'spv, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'spv, 'tcx> {
fn with_cx(cx: &'a Self::CodegenCx) -> Self {
// Note: all defaults here *must* be filled out by position_at_end
@ -341,13 +355,24 @@ impl<'a, 'spv, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'spv, 'tcx> {
self.load(ptr, Align::from_bytes(0).unwrap())
}
fn atomic_load(
&mut self,
_ptr: Self::Value,
_order: AtomicOrdering,
_size: Size,
) -> Self::Value {
panic!("TODO: atomic_load not supported yet")
fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, _size: Size) -> Self::Value {
let ty = match self.lookup_type(ptr.ty) {
SpirvType::Pointer {
storage_class: _,
pointee,
} => pointee,
ty => panic!(
"atomic_load called on variable that wasn't a pointer: {:?}",
ty
),
};
// TODO: Default to device scope
let memory = self.constant_u32(Scope::Device as u32);
let semantics = self.constant_u32(ordering_to_semantics(order).bits());
self.emit()
.atomic_load(ty, None, ptr.def, memory.def, semantics.def)
.unwrap()
.with_type(ty)
}
fn load_operand(
@ -434,12 +459,28 @@ impl<'a, 'spv, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'spv, 'tcx> {
fn atomic_store(
&mut self,
_val: Self::Value,
_ptr: Self::Value,
_order: AtomicOrdering,
val: Self::Value,
ptr: Self::Value,
order: AtomicOrdering,
_size: Size,
) {
panic!("TODO: atomic_store not supported yet")
let ptr_elem_ty = match self.lookup_type(ptr.ty) {
SpirvType::Pointer {
storage_class: _,
pointee,
} => pointee,
ty => panic!(
"atomic_store called on variable that wasn't a pointer: {:?}",
ty
),
};
assert_ty_eq!(self, ptr_elem_ty, val.ty);
// TODO: Default to device scope
let memory = self.constant_u32(Scope::Device as u32);
let semantics = self.constant_u32(ordering_to_semantics(order).bits());
self.emit()
.atomic_store(ptr.def, memory.def, semantics.def, val.def)
.unwrap()
}
fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value {
@ -951,28 +992,91 @@ impl<'a, 'spv, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'spv, 'tcx> {
fn atomic_cmpxchg(
&mut self,
_dst: Self::Value,
_cmp: Self::Value,
_src: Self::Value,
_order: AtomicOrdering,
_failure_order: AtomicOrdering,
dst: Self::Value,
cmp: Self::Value,
src: Self::Value,
order: AtomicOrdering,
failure_order: AtomicOrdering,
_weak: bool,
) -> Self::Value {
todo!()
let dst_pointee_ty = match self.lookup_type(dst.ty) {
SpirvType::Pointer {
storage_class: _,
pointee,
} => pointee,
ty => panic!(
"atomic_cmpxchg called on variable that wasn't a pointer: {:?}",
ty
),
};
assert_ty_eq!(self, dst_pointee_ty, cmp.ty);
assert_ty_eq!(self, dst_pointee_ty, src.ty);
// TODO: Default to device scope
let memory = self.constant_u32(Scope::Device as u32);
let semantics_equal = self.constant_u32(ordering_to_semantics(order).bits());
let semantics_unequal = self.constant_u32(ordering_to_semantics(failure_order).bits());
// Note: OpAtomicCompareExchangeWeak is deprecated, and has the same semantics
self.emit()
.atomic_compare_exchange(
src.ty,
None,
dst.def,
memory.def,
semantics_equal.def,
semantics_unequal.def,
src.def,
cmp.def,
)
.unwrap()
.with_type(src.ty)
}
fn atomic_rmw(
&mut self,
_op: AtomicRmwBinOp,
_dst: Self::Value,
_src: Self::Value,
_order: AtomicOrdering,
op: AtomicRmwBinOp,
dst: Self::Value,
src: Self::Value,
order: AtomicOrdering,
) -> Self::Value {
todo!()
let dst_pointee_ty = match self.lookup_type(dst.ty) {
SpirvType::Pointer {
storage_class: _,
pointee,
} => pointee,
ty => panic!(
"atomic_rmw called on variable that wasn't a pointer: {:?}",
ty
),
};
assert_ty_eq!(self, dst_pointee_ty, src.ty);
// TODO: Default to device scope
let memory = self.constant_u32(Scope::Device as u32).def;
let semantics = self.constant_u32(ordering_to_semantics(order).bits()).def;
let mut emit = self.emit();
use AtomicRmwBinOp::*;
match op {
AtomicXchg => emit.atomic_exchange(src.ty, None, dst.def, memory, semantics, src.def),
AtomicAdd => emit.atomic_i_add(src.ty, None, dst.def, memory, semantics, src.def),
AtomicSub => emit.atomic_i_sub(src.ty, None, dst.def, memory, semantics, src.def),
AtomicAnd => emit.atomic_and(src.ty, None, dst.def, memory, semantics, src.def),
AtomicNand => panic!("atomic nand is not supported"),
AtomicOr => emit.atomic_or(src.ty, None, dst.def, memory, semantics, src.def),
AtomicXor => emit.atomic_xor(src.ty, None, dst.def, memory, semantics, src.def),
AtomicMax => emit.atomic_s_max(src.ty, None, dst.def, memory, semantics, src.def),
AtomicMin => emit.atomic_s_min(src.ty, None, dst.def, memory, semantics, src.def),
AtomicUMax => emit.atomic_u_max(src.ty, None, dst.def, memory, semantics, src.def),
AtomicUMin => emit.atomic_u_min(src.ty, None, dst.def, memory, semantics, src.def),
}
.unwrap()
.with_type(src.ty)
}
fn atomic_fence(&mut self, _order: AtomicOrdering, _scope: SynchronizationScope) {
todo!()
fn atomic_fence(&mut self, order: AtomicOrdering, _scope: SynchronizationScope) {
// Ignore sync scope (it only has "single thread" and "cross thread")
// TODO: Default to device scope
let memory = self.constant_u32(Scope::Device as u32).def;
let semantics = self.constant_u32(ordering_to_semantics(order).bits()).def;
self.emit().memory_barrier(memory, semantics).unwrap();
}
fn set_invariant_load(&mut self, _load: Self::Value) {

View File

@ -14,7 +14,7 @@ use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{
AbiBuilderMethods, ArgAbiMethods, AsmBuilderMethods, BackendTypes, BaseTypeMethods,
BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, DebugInfoBuilderMethods, HasCodegen,
InlineAsmOperandRef, IntrinsicCallMethods, OverflowOp, StaticBuilderMethods,
InlineAsmOperandRef, IntrinsicCallMethods, MiscMethods, OverflowOp, StaticBuilderMethods,
};
use rustc_codegen_ssa::MemFlags;
use rustc_hir::LlvmInlineAsmInner;
@ -367,9 +367,8 @@ impl<'a, 'spv, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'spv, 'tcx> {
let arg_tys = sig.inputs();
let ret_ty = sig.output();
let name = self.tcx.item_name(def_id);
// let name_str = &*name.as_str();
let name_str = &*name.as_str();
// let spirv_ret_ty = self.trans_type(self.layout_of(ret_ty));
let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
let value = match name {
@ -453,6 +452,143 @@ impl<'a, 'spv, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'spv, 'tcx> {
args[1].val.unaligned_volatile_store(self, dst);
return;
}
// This requires that atomic intrinsics follow a specific naming pattern:
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
name if name_str.starts_with("atomic_") => {
use rustc_codegen_ssa::common::AtomicOrdering::*;
use rustc_codegen_ssa::common::{AtomicRmwBinOp, SynchronizationScope};
let split: Vec<&str> = name_str.split('_').collect();
let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
let (order, failorder) = match split.len() {
2 => (SequentiallyConsistent, SequentiallyConsistent),
3 => match split[2] {
"unordered" => (Unordered, Unordered),
"relaxed" => (Monotonic, Monotonic),
"acq" => (Acquire, Acquire),
"rel" => (Release, Monotonic),
"acqrel" => (AcquireRelease, Acquire),
"failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
"failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
_ => self.sess().fatal("unknown ordering in atomic intrinsic"),
},
4 => match (split[2], split[3]) {
("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
_ => self.sess().fatal("unknown ordering in atomic intrinsic"),
},
_ => self.sess().fatal("Atomic intrinsic not in correct format"),
};
let invalid_monomorphization = |ty| {
span_invalid_monomorphization_error(
self.tcx.sess,
span,
&format!(
"invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`",
name, ty
),
);
};
match split[1] {
"cxchg" | "cxchgweak" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
let weak = split[1] == "cxchgweak";
let pair = self.atomic_cmpxchg(
args[0].immediate(),
args[1].immediate(),
args[2].immediate(),
order,
failorder,
weak,
);
let val = self.extract_value(pair, 0);
let success = self.extract_value(pair, 1);
let success = self.zext(success, SpirvType::Bool.def(self));
let dest = result.project_field(self, 0);
self.store(val, dest.llval, dest.align);
let dest = result.project_field(self, 1);
self.store(success, dest.llval, dest.align);
return;
} else {
return invalid_monomorphization(ty);
}
}
"load" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
let size = self.size_of(ty);
self.atomic_load(args[0].immediate(), order, size)
} else {
return invalid_monomorphization(ty);
}
}
"store" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
let size = self.size_of(ty);
self.atomic_store(
args[1].immediate(),
args[0].immediate(),
order,
size,
);
return;
} else {
return invalid_monomorphization(ty);
}
}
"fence" => {
self.atomic_fence(order, SynchronizationScope::CrossThread);
return;
}
"singlethreadfence" => {
self.atomic_fence(order, SynchronizationScope::SingleThread);
return;
}
// These are all AtomicRMW ops
op => {
let atom_op = match op {
"xchg" => AtomicRmwBinOp::AtomicXchg,
"xadd" => AtomicRmwBinOp::AtomicAdd,
"xsub" => AtomicRmwBinOp::AtomicSub,
"and" => AtomicRmwBinOp::AtomicAnd,
"nand" => AtomicRmwBinOp::AtomicNand,
"or" => AtomicRmwBinOp::AtomicOr,
"xor" => AtomicRmwBinOp::AtomicXor,
"max" => AtomicRmwBinOp::AtomicMax,
"min" => AtomicRmwBinOp::AtomicMin,
"umax" => AtomicRmwBinOp::AtomicUMax,
"umin" => AtomicRmwBinOp::AtomicUMin,
_ => self.sess().fatal("unknown atomic operation"),
};
let ty = substs.type_at(0);
if int_type_width_signed(ty, self).is_some() {
self.atomic_rmw(
atom_op,
args[0].immediate(),
args[1].immediate(),
order,
)
} else {
return invalid_monomorphization(ty);
}
}
}
}
sym::prefetch_read_data
| sym::prefetch_write_data
| sym::prefetch_read_instruction