Transfered memcpy and memset to BuilderMethods

This commit is contained in:
Denis Merigoux 2018-09-10 17:59:20 +02:00 committed by Eduard-Mihai Burtescu
parent 3c082a23e8
commit a5aeb8edd6
8 changed files with 112 additions and 101 deletions

View File

@ -9,7 +9,6 @@
// except according to those terms.
use llvm::{self, AttributePlace};
use base;
use builder::{Builder, MemFlags};
use context::CodegenCx;
use mir::place::PlaceRef;
@ -239,13 +238,14 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
bx.store(val, llscratch, scratch_align);
// ...and then memcpy it to the intended destination.
base::call_memcpy(bx,
bx.pointercast(dst.llval, cx.type_i8p()),
self.layout.align,
bx.pointercast(llscratch, cx.type_i8p()),
scratch_align,
cx.const_usize(self.layout.size.bytes()),
MemFlags::empty());
bx.memcpy(
bx.pointercast(dst.llval, cx.type_i8p()),
self.layout.align,
bx.pointercast(llscratch, cx.type_i8p()),
scratch_align,
cx.const_usize(self.layout.size.bytes()),
MemFlags::empty()
);
bx.lifetime_end(llscratch, scratch_size);
}

View File

@ -433,30 +433,6 @@ pub fn to_immediate_scalar<'a, 'tcx: 'a, Builder: BuilderMethods<'a, 'tcx>>(
val
}
pub fn call_memcpy<'a, 'tcx: 'a, Builder: BuilderMethods<'a, 'tcx>>(
bx: &Builder,
dst: Builder::Value,
dst_align: Align,
src: Builder::Value,
src_align: Align,
n_bytes: Builder::Value,
flags: MemFlags,
) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let val = bx.load(src, src_align);
let ptr = bx.pointercast(dst, bx.cx().type_ptr_to(bx.cx().val_ty(val)));
bx.store_with_flags(val, ptr, dst_align, flags);
return;
}
let cx = bx.cx();
let src_ptr = bx.pointercast(src, cx.type_i8p());
let dst_ptr = bx.pointercast(dst, cx.type_i8p());
let size = bx.intcast(n_bytes, cx.type_isize(), false);
let volatile = flags.contains(MemFlags::VOLATILE);
bx.memcpy(dst_ptr, dst_align.abi(), src_ptr, src_align.abi(), size, volatile);
}
pub fn memcpy_ty<'a, 'tcx: 'a, Builder: BuilderMethods<'a, 'tcx>>(
bx: &Builder,
dst: Builder::Value,
@ -471,22 +447,7 @@ pub fn memcpy_ty<'a, 'tcx: 'a, Builder: BuilderMethods<'a, 'tcx>>(
return;
}
call_memcpy(bx, dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
}
pub fn call_memset<'a, 'tcx: 'a, Builder: BuilderMethods<'a, 'tcx>>(
bx: &Builder,
ptr: Builder::Value,
fill_byte: Builder::Value,
size: Builder::Value,
align: Builder::Value,
volatile: bool,
) -> Builder::Value {
let ptr_width = &bx.sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key);
let volatile = bx.cx().const_bool(volatile);
bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
}
pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) {

View File

@ -785,22 +785,61 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn memcpy(&self, dst: &'ll Value, dst_align: u64,
src: &'ll Value, src_align: u64,
size: &'ll Value, is_volatile: bool) -> &'ll Value {
fn memcpy(&self, dst: &'ll Value, dst_align: Align,
src: &'ll Value, src_align: Align,
size: &'ll Value, flags: MemFlags) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let val = self.load(src, src_align);
let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
self.store_with_flags(val, ptr, dst_align, flags);
return;
}
let size = self.intcast(size, self.cx().type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.cx().type_i8p());
let src = self.pointercast(src, self.cx().type_i8p());
unsafe {
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align as c_uint,
src, src_align as c_uint, size, is_volatile)
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.abi() as c_uint,
src, src_align.abi() as c_uint, size, is_volatile);
}
}
fn memmove(&self, dst: &'ll Value, dst_align: u64,
src: &'ll Value, src_align: u64,
size: &'ll Value, is_volatile: bool) -> &'ll Value {
unsafe {
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align as c_uint,
src, src_align as c_uint, size, is_volatile)
fn memmove(&self, dst: &'ll Value, dst_align: Align,
src: &'ll Value, src_align: Align,
size: &'ll Value, flags: MemFlags) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memmove.
let val = self.load(src, src_align);
let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
self.store_with_flags(val, ptr, dst_align, flags);
return;
}
let size = self.intcast(size, self.cx().type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.cx().type_i8p());
let src = self.pointercast(src, self.cx().type_i8p());
unsafe {
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.abi() as c_uint,
src, src_align.abi() as c_uint, size, is_volatile);
}
}
fn memset(
&self,
ptr: &'ll Value,
fill_byte: &'ll Value,
size: &'ll Value,
align: Align,
flags: MemFlags,
) {
let ptr_width = &self.sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
let ptr = self.pointercast(ptr, self.cx().type_i8p());
let align = self.cx().const_u32(align.abi() as u32);
let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE));
self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
}
fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {

View File

@ -170,12 +170,20 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: Backend {
) -> Option<Self::Value>;
fn memcpy(&self, dst: Self::Value, dst_align: u64,
src: Self::Value, src_align: u64,
size: Self::Value, is_volatile: bool) -> Self::Value;
fn memmove(&self, dst: Self::Value, dst_align: u64,
src: Self::Value, src_align: u64,
size: Self::Value, is_volatile: bool) -> Self::Value;
fn memcpy(&self, dst: Self::Value, dst_align: Align,
src: Self::Value, src_align: Align,
size: Self::Value, flags: MemFlags);
fn memmove(&self, dst: Self::Value, dst_align: Align,
src: Self::Value, src_align: Align,
size: Self::Value, flags: MemFlags);
fn memset(
&self,
ptr: Self::Value,
fill_byte: Self::Value,
size: Self::Value,
align: Align,
flags: MemFlags,
);
fn minnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
fn maxnum(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;

View File

@ -29,7 +29,7 @@ use rustc::ty::layout::LayoutOf;
use rustc::hir;
use syntax::ast;
use syntax::symbol::Symbol;
use builder::Builder;
use builder::{Builder, MemFlags};
use value::Value;
use interfaces::{
@ -228,28 +228,34 @@ pub fn codegen_intrinsic_call(
"copy_nonoverlapping" => {
copy_intrinsic(bx, false, false, substs.type_at(0),
args[1].immediate(), args[0].immediate(), args[2].immediate())
args[1].immediate(), args[0].immediate(), args[2].immediate());
return;
}
"copy" => {
copy_intrinsic(bx, true, false, substs.type_at(0),
args[1].immediate(), args[0].immediate(), args[2].immediate())
args[1].immediate(), args[0].immediate(), args[2].immediate());
return;
}
"write_bytes" => {
memset_intrinsic(bx, false, substs.type_at(0),
args[0].immediate(), args[1].immediate(), args[2].immediate())
args[0].immediate(), args[1].immediate(), args[2].immediate());
return;
}
"volatile_copy_nonoverlapping_memory" => {
copy_intrinsic(bx, false, true, substs.type_at(0),
args[0].immediate(), args[1].immediate(), args[2].immediate())
args[0].immediate(), args[1].immediate(), args[2].immediate());
return;
}
"volatile_copy_memory" => {
copy_intrinsic(bx, true, true, substs.type_at(0),
args[0].immediate(), args[1].immediate(), args[2].immediate())
args[0].immediate(), args[1].immediate(), args[2].immediate());
return;
}
"volatile_set_memory" => {
memset_intrinsic(bx, true, substs.type_at(0),
args[0].immediate(), args[1].immediate(), args[2].immediate())
args[0].immediate(), args[1].immediate(), args[2].immediate());
return;
}
"volatile_load" | "unaligned_volatile_load" => {
let tp_ty = substs.type_at(0);
@ -725,17 +731,18 @@ fn copy_intrinsic(
dst: &'ll Value,
src: &'ll Value,
count: &'ll Value,
) -> &'ll Value {
let cx = bx.cx();
let (size, align) = cx.size_and_align_of(ty);
let size = cx.const_usize(size.bytes());
let align = align.abi();
let dst_ptr = bx.pointercast(dst, cx.type_i8p());
let src_ptr = bx.pointercast(src, cx.type_i8p());
if allow_overlap {
bx.memmove(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile)
) {
let (size, align) = bx.cx().size_and_align_of(ty);
let size = bx.mul(bx.cx().const_usize(size.bytes()), count);
let flags = if volatile {
MemFlags::VOLATILE
} else {
bx.memcpy(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile)
MemFlags::empty()
};
if allow_overlap {
bx.memmove(dst, align, src, align, size, flags);
} else {
bx.memcpy(dst, align, src, align, size, flags);
}
}
@ -746,13 +753,15 @@ fn memset_intrinsic(
dst: &'ll Value,
val: &'ll Value,
count: &'ll Value
) -> &'ll Value {
let cx = bx.cx();
let (size, align) = cx.size_and_align_of(ty);
let size = cx.const_usize(size.bytes());
let align = cx.const_i32(align.abi() as i32);
let dst = bx.pointercast(dst, cx.type_i8p());
call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
) {
let (size, align) = bx.cx().size_and_align_of(ty);
let size = bx.cx().const_usize(size.bytes());
let flags = if volatile {
MemFlags::VOLATILE
} else {
MemFlags::empty()
};
bx.memset(dst, val, bx.mul(size, count), align, flags);
}
fn try_intrinsic(

View File

@ -349,7 +349,7 @@ impl OperandValue<&'ll Value> {
// Allocate an appropriate region on the stack, and copy the value into it
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align);
base::call_memcpy(bx, lldst, max_align, llptr, min_align, llsize, flags);
bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
// Store the allocated region and the extra to the indirect place.
let indirect_operand = OperandValue::Pair(lldst, llextra);

View File

@ -14,7 +14,7 @@ use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx};
use rustc::mir;
use rustc::mir::tcx::PlaceTy;
use base;
use builder::Builder;
use builder::{Builder, MemFlags};
use common::{CodegenCx, IntPredicate};
use type_of::LayoutLlvmExt;
use value::Value;
@ -381,15 +381,10 @@ impl PlaceRef<'tcx, &'ll Value> {
bx.sess().target.target.arch == "aarch64" {
// Issue #34427: As workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value.
let llptr = bx.pointercast(
self.llval,
bx.cx().type_ptr_to(bx.cx().type_i8())
);
let fill_byte = bx.cx().const_u8(0);
let (size, align) = self.layout.size_and_align();
let size = bx.cx().const_usize(size.bytes());
let align = bx.cx().const_u32(align.abi() as u32);
base::call_memset(bx, llptr, fill_byte, size, align, false);
bx.memset(self.llval, fill_byte, size, align, MemFlags::empty());
}
let niche = self.project_field(bx, 0);

View File

@ -17,7 +17,7 @@ use rustc_apfloat::{ieee, Float, Status, Round};
use std::{u128, i128};
use base;
use builder::Builder;
use builder::{Builder, MemFlags};
use callee;
use common::{self, IntPredicate, RealPredicate};
use monomorphize;
@ -104,20 +104,19 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let start = dest.project_index(&bx, bx.cx().const_usize(0)).llval;
if let OperandValue::Immediate(v) = cg_elem.val {
let align = bx.cx().const_i32(dest.align.abi() as i32);
let size = bx.cx().const_usize(dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 {
let fill = bx.cx().const_u8(0);
base::call_memset(&bx, start, fill, size, align, false);
bx.memset(start, fill, size, dest.align, MemFlags::empty());
return bx;
}
// Use llvm.memset.p0i8.* to initialize byte arrays
let v = base::from_immediate(&bx, v);
if bx.cx().val_ty(v) == bx.cx().type_i8() {
base::call_memset(&bx, start, v, size, align, false);
bx.memset(start, v, size, dest.align, MemFlags::empty());
return bx;
}
}