mirror of
https://github.com/rust-lang/rust.git
synced 2024-11-22 06:44:35 +00:00
Make PlaceRef
hold a PlaceValue
for the non-layout fields (like OperandRef
does)
This commit is contained in:
parent
c2239bca5b
commit
89502e584b
@ -974,7 +974,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
place: PlaceRef<'tcx, RValue<'gcc>>,
|
place: PlaceRef<'tcx, RValue<'gcc>>,
|
||||||
) -> OperandRef<'tcx, RValue<'gcc>> {
|
) -> OperandRef<'tcx, RValue<'gcc>> {
|
||||||
assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
|
assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized());
|
||||||
|
|
||||||
if place.layout.is_zst() {
|
if place.layout.is_zst() {
|
||||||
return OperandRef::zero_sized(place.layout);
|
return OperandRef::zero_sized(place.layout);
|
||||||
@ -999,10 +999,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let val = if let Some(llextra) = place.llextra {
|
let val = if let Some(llextra) = place.val.llextra {
|
||||||
OperandValue::Ref(place.llval, Some(llextra), place.align)
|
OperandValue::Ref(place.val.llval, Some(llextra), place.val.align)
|
||||||
} else if place.layout.is_gcc_immediate() {
|
} else if place.layout.is_gcc_immediate() {
|
||||||
let load = self.load(place.layout.gcc_type(self), place.llval, place.align);
|
let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align);
|
||||||
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
|
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
|
||||||
scalar_load_metadata(self, load, scalar);
|
scalar_load_metadata(self, load, scalar);
|
||||||
}
|
}
|
||||||
@ -1012,9 +1012,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||||||
|
|
||||||
let mut load = |i, scalar: &abi::Scalar, align| {
|
let mut load = |i, scalar: &abi::Scalar, align| {
|
||||||
let llptr = if i == 0 {
|
let llptr = if i == 0 {
|
||||||
place.llval
|
place.val.llval
|
||||||
} else {
|
} else {
|
||||||
self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
|
self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes()))
|
||||||
};
|
};
|
||||||
let llty = place.layout.scalar_pair_element_gcc_type(self, i);
|
let llty = place.layout.scalar_pair_element_gcc_type(self, i);
|
||||||
let load = self.load(llty, llptr, align);
|
let load = self.load(llty, llptr, align);
|
||||||
@ -1027,11 +1027,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
OperandValue::Pair(
|
OperandValue::Pair(
|
||||||
load(0, a, place.align),
|
load(0, a, place.val.align),
|
||||||
load(1, b, place.align.restrict_for_offset(b_offset)),
|
load(1, b, place.val.align.restrict_for_offset(b_offset)),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
OperandValue::Ref(place.llval, None, place.align)
|
OperandValue::Ref(place.val.llval, None, place.val.align)
|
||||||
};
|
};
|
||||||
|
|
||||||
OperandRef { val, layout: place.layout }
|
OperandRef { val, layout: place.layout }
|
||||||
@ -1045,8 +1045,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||||||
) {
|
) {
|
||||||
let zero = self.const_usize(0);
|
let zero = self.const_usize(0);
|
||||||
let count = self.const_usize(count);
|
let count = self.const_usize(count);
|
||||||
let start = dest.project_index(self, zero).llval;
|
let start = dest.project_index(self, zero).val.llval;
|
||||||
let end = dest.project_index(self, count).llval;
|
let end = dest.project_index(self, count).val.llval;
|
||||||
|
|
||||||
let header_bb = self.append_sibling_block("repeat_loop_header");
|
let header_bb = self.append_sibling_block("repeat_loop_header");
|
||||||
let body_bb = self.append_sibling_block("repeat_loop_body");
|
let body_bb = self.append_sibling_block("repeat_loop_body");
|
||||||
@ -1064,7 +1064,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||||||
self.cond_br(keep_going, body_bb, next_bb);
|
self.cond_br(keep_going, body_bb, next_bb);
|
||||||
|
|
||||||
self.switch_to_block(body_bb);
|
self.switch_to_block(body_bb);
|
||||||
let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
|
let align = dest.val.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
|
||||||
cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
|
cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
|
||||||
|
|
||||||
let next = self.inbounds_gep(
|
let next = self.inbounds_gep(
|
||||||
|
@ -354,7 +354,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||||||
|
|
||||||
let block = self.llbb();
|
let block = self.llbb();
|
||||||
let extended_asm = block.add_extended_asm(None, "");
|
let extended_asm = block.add_extended_asm(None, "");
|
||||||
extended_asm.add_input_operand(None, "r", result.llval);
|
extended_asm.add_input_operand(None, "r", result.val.llval);
|
||||||
extended_asm.add_clobber("memory");
|
extended_asm.add_clobber("memory");
|
||||||
extended_asm.set_volatile_flag(true);
|
extended_asm.set_volatile_flag(true);
|
||||||
|
|
||||||
@ -388,8 +388,8 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
|
|||||||
if !fn_abi.ret.is_ignore() {
|
if !fn_abi.ret.is_ignore() {
|
||||||
if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
|
if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
|
||||||
let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
|
let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
|
||||||
let ptr = self.pointercast(result.llval, ptr_llty);
|
let ptr = self.pointercast(result.val.llval, ptr_llty);
|
||||||
self.store(llval, ptr, result.align);
|
self.store(llval, ptr, result.val.align);
|
||||||
} else {
|
} else {
|
||||||
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
|
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
|
||||||
.val
|
.val
|
||||||
@ -511,7 +511,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
|
|||||||
let can_store_through_cast_ptr = false;
|
let can_store_through_cast_ptr = false;
|
||||||
if can_store_through_cast_ptr {
|
if can_store_through_cast_ptr {
|
||||||
let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
|
let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
|
||||||
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
|
let cast_dst = bx.pointercast(dst.val.llval, cast_ptr_llty);
|
||||||
bx.store(val, cast_dst, self.layout.align.abi);
|
bx.store(val, cast_dst, self.layout.align.abi);
|
||||||
} else {
|
} else {
|
||||||
// The actual return type is a struct, but the ABI
|
// The actual return type is a struct, but the ABI
|
||||||
@ -539,7 +539,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
|
|||||||
|
|
||||||
// ... and then memcpy it to the intended destination.
|
// ... and then memcpy it to the intended destination.
|
||||||
bx.memcpy(
|
bx.memcpy(
|
||||||
dst.llval,
|
dst.val.llval,
|
||||||
self.layout.align.abi,
|
self.layout.align.abi,
|
||||||
llscratch,
|
llscratch,
|
||||||
scratch_align,
|
scratch_align,
|
||||||
|
@ -82,7 +82,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
|
|||||||
let place = PlaceRef::alloca(bx, args[0].layout);
|
let place = PlaceRef::alloca(bx, args[0].layout);
|
||||||
args[0].val.store(bx, place);
|
args[0].val.store(bx, place);
|
||||||
let int_ty = bx.type_ix(expected_bytes * 8);
|
let int_ty = bx.type_ix(expected_bytes * 8);
|
||||||
let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
|
let ptr = bx.pointercast(place.val.llval, bx.cx.type_ptr_to(int_ty));
|
||||||
bx.load(int_ty, ptr, Align::ONE)
|
bx.load(int_ty, ptr, Align::ONE)
|
||||||
}
|
}
|
||||||
_ => return_error!(InvalidMonomorphization::InvalidBitmask {
|
_ => return_error!(InvalidMonomorphization::InvalidBitmask {
|
||||||
|
@ -233,7 +233,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
|
|||||||
bx.store(val, llscratch, scratch_align);
|
bx.store(val, llscratch, scratch_align);
|
||||||
// ... and then memcpy it to the intended destination.
|
// ... and then memcpy it to the intended destination.
|
||||||
bx.memcpy(
|
bx.memcpy(
|
||||||
dst.llval,
|
dst.val.llval,
|
||||||
self.layout.align.abi,
|
self.layout.align.abi,
|
||||||
llscratch,
|
llscratch,
|
||||||
scratch_align,
|
scratch_align,
|
||||||
|
@ -535,7 +535,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||||||
panic!("unsized locals must not be `extern` types");
|
panic!("unsized locals must not be `extern` types");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
|
assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized());
|
||||||
|
|
||||||
if place.layout.is_zst() {
|
if place.layout.is_zst() {
|
||||||
return OperandRef::zero_sized(place.layout);
|
return OperandRef::zero_sized(place.layout);
|
||||||
@ -579,13 +579,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let val = if let Some(llextra) = place.llextra {
|
let val = if let Some(llextra) = place.val.llextra {
|
||||||
OperandValue::Ref(place.llval, Some(llextra), place.align)
|
OperandValue::Ref(place.val.llval, Some(llextra), place.val.align)
|
||||||
} else if place.layout.is_llvm_immediate() {
|
} else if place.layout.is_llvm_immediate() {
|
||||||
let mut const_llval = None;
|
let mut const_llval = None;
|
||||||
let llty = place.layout.llvm_type(self);
|
let llty = place.layout.llvm_type(self);
|
||||||
unsafe {
|
unsafe {
|
||||||
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
|
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) {
|
||||||
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
|
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
|
||||||
if let Some(init) = llvm::LLVMGetInitializer(global) {
|
if let Some(init) = llvm::LLVMGetInitializer(global) {
|
||||||
if self.val_ty(init) == llty {
|
if self.val_ty(init) == llty {
|
||||||
@ -596,7 +596,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
let llval = const_llval.unwrap_or_else(|| {
|
let llval = const_llval.unwrap_or_else(|| {
|
||||||
let load = self.load(llty, place.llval, place.align);
|
let load = self.load(llty, place.val.llval, place.val.align);
|
||||||
if let abi::Abi::Scalar(scalar) = place.layout.abi {
|
if let abi::Abi::Scalar(scalar) = place.layout.abi {
|
||||||
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
|
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
|
||||||
}
|
}
|
||||||
@ -608,9 +608,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||||||
|
|
||||||
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
|
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
|
||||||
let llptr = if i == 0 {
|
let llptr = if i == 0 {
|
||||||
place.llval
|
place.val.llval
|
||||||
} else {
|
} else {
|
||||||
self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
|
self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes()))
|
||||||
};
|
};
|
||||||
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
|
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
|
||||||
let load = self.load(llty, llptr, align);
|
let load = self.load(llty, llptr, align);
|
||||||
@ -619,11 +619,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
OperandValue::Pair(
|
OperandValue::Pair(
|
||||||
load(0, a, place.layout, place.align, Size::ZERO),
|
load(0, a, place.layout, place.val.align, Size::ZERO),
|
||||||
load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset),
|
load(1, b, place.layout, place.val.align.restrict_for_offset(b_offset), b_offset),
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
OperandValue::Ref(place.llval, None, place.align)
|
OperandValue::Ref(place.val.llval, None, place.val.align)
|
||||||
};
|
};
|
||||||
|
|
||||||
OperandRef { val, layout: place.layout }
|
OperandRef { val, layout: place.layout }
|
||||||
|
@ -264,7 +264,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
|||||||
llvm::LLVMSetAlignment(load, align);
|
llvm::LLVMSetAlignment(load, align);
|
||||||
}
|
}
|
||||||
if !result.layout.is_zst() {
|
if !result.layout.is_zst() {
|
||||||
self.store(load, result.llval, result.align);
|
self.store(load, result.val.llval, result.val.align);
|
||||||
}
|
}
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
@ -428,7 +428,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
|||||||
|
|
||||||
sym::black_box => {
|
sym::black_box => {
|
||||||
args[0].val.store(self, result);
|
args[0].val.store(self, result);
|
||||||
let result_val_span = [result.llval];
|
let result_val_span = [result.val.llval];
|
||||||
// We need to "use" the argument in some way LLVM can't introspect, and on
|
// We need to "use" the argument in some way LLVM can't introspect, and on
|
||||||
// targets that support it we can typically leverage inline assembly to do
|
// targets that support it we can typically leverage inline assembly to do
|
||||||
// this. LLVM's interpretation of inline assembly is that it's, well, a black
|
// this. LLVM's interpretation of inline assembly is that it's, well, a black
|
||||||
@ -482,7 +482,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
|||||||
|
|
||||||
if !fn_abi.ret.is_ignore() {
|
if !fn_abi.ret.is_ignore() {
|
||||||
if let PassMode::Cast { .. } = &fn_abi.ret.mode {
|
if let PassMode::Cast { .. } = &fn_abi.ret.mode {
|
||||||
self.store(llval, result.llval, result.align);
|
self.store(llval, result.val.llval, result.val.align);
|
||||||
} else {
|
} else {
|
||||||
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
|
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
|
||||||
.val
|
.val
|
||||||
@ -1065,7 +1065,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
|
|||||||
let place = PlaceRef::alloca(bx, args[0].layout);
|
let place = PlaceRef::alloca(bx, args[0].layout);
|
||||||
args[0].val.store(bx, place);
|
args[0].val.store(bx, place);
|
||||||
let int_ty = bx.type_ix(expected_bytes * 8);
|
let int_ty = bx.type_ix(expected_bytes * 8);
|
||||||
bx.load(int_ty, place.llval, Align::ONE)
|
bx.load(int_ty, place.val.llval, Align::ONE)
|
||||||
}
|
}
|
||||||
_ => return_error!(InvalidMonomorphization::InvalidBitmask {
|
_ => return_error!(InvalidMonomorphization::InvalidBitmask {
|
||||||
span,
|
span,
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
use super::operand::OperandRef;
|
use super::operand::OperandRef;
|
||||||
use super::operand::OperandValue::{Immediate, Pair, Ref, ZeroSized};
|
use super::operand::OperandValue::{Immediate, Pair, Ref, ZeroSized};
|
||||||
use super::place::PlaceRef;
|
use super::place::{PlaceRef, PlaceValue};
|
||||||
use super::{CachedLlbb, FunctionCx, LocalRef};
|
use super::{CachedLlbb, FunctionCx, LocalRef};
|
||||||
|
|
||||||
use crate::base;
|
use crate::base;
|
||||||
@ -242,7 +242,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
|
|||||||
bx.switch_to_block(fx.llbb(target));
|
bx.switch_to_block(fx.llbb(target));
|
||||||
fx.set_debug_loc(bx, self.terminator.source_info);
|
fx.set_debug_loc(bx, self.terminator.source_info);
|
||||||
for tmp in copied_constant_arguments {
|
for tmp in copied_constant_arguments {
|
||||||
bx.lifetime_end(tmp.llval, tmp.layout.size);
|
bx.lifetime_end(tmp.val.llval, tmp.layout.size);
|
||||||
}
|
}
|
||||||
fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
|
fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
|
||||||
}
|
}
|
||||||
@ -256,7 +256,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
|
|||||||
|
|
||||||
if let Some((ret_dest, target)) = destination {
|
if let Some((ret_dest, target)) = destination {
|
||||||
for tmp in copied_constant_arguments {
|
for tmp in copied_constant_arguments {
|
||||||
bx.lifetime_end(tmp.llval, tmp.layout.size);
|
bx.lifetime_end(tmp.val.llval, tmp.layout.size);
|
||||||
}
|
}
|
||||||
fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
|
fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
|
||||||
self.funclet_br(fx, bx, target, mergeable_succ)
|
self.funclet_br(fx, bx, target, mergeable_succ)
|
||||||
@ -431,7 +431,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
let va_list_arg_idx = self.fn_abi.args.len();
|
let va_list_arg_idx = self.fn_abi.args.len();
|
||||||
match self.locals[mir::Local::from_usize(1 + va_list_arg_idx)] {
|
match self.locals[mir::Local::from_usize(1 + va_list_arg_idx)] {
|
||||||
LocalRef::Place(va_list) => {
|
LocalRef::Place(va_list) => {
|
||||||
bx.va_end(va_list.llval);
|
bx.va_end(va_list.val.llval);
|
||||||
}
|
}
|
||||||
_ => bug!("C-variadic function must have a `VaList` place"),
|
_ => bug!("C-variadic function must have a `VaList` place"),
|
||||||
}
|
}
|
||||||
@ -467,7 +467,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
LocalRef::Operand(op) => op,
|
LocalRef::Operand(op) => op,
|
||||||
LocalRef::PendingOperand => bug!("use of return before def"),
|
LocalRef::PendingOperand => bug!("use of return before def"),
|
||||||
LocalRef::Place(cg_place) => OperandRef {
|
LocalRef::Place(cg_place) => OperandRef {
|
||||||
val: Ref(cg_place.llval, None, cg_place.align),
|
val: Ref(cg_place.val.llval, None, cg_place.val.align),
|
||||||
layout: cg_place.layout,
|
layout: cg_place.layout,
|
||||||
},
|
},
|
||||||
LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
|
LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
|
||||||
@ -476,7 +476,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
Immediate(_) | Pair(..) => {
|
Immediate(_) | Pair(..) => {
|
||||||
let scratch = PlaceRef::alloca(bx, self.fn_abi.ret.layout);
|
let scratch = PlaceRef::alloca(bx, self.fn_abi.ret.layout);
|
||||||
op.val.store(bx, scratch);
|
op.val.store(bx, scratch);
|
||||||
scratch.llval
|
scratch.val.llval
|
||||||
}
|
}
|
||||||
Ref(llval, _, align) => {
|
Ref(llval, _, align) => {
|
||||||
assert_eq!(align, op.layout.align.abi, "return place is unaligned!");
|
assert_eq!(align, op.layout.align.abi, "return place is unaligned!");
|
||||||
@ -512,11 +512,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
|
|
||||||
let place = self.codegen_place(bx, location.as_ref());
|
let place = self.codegen_place(bx, location.as_ref());
|
||||||
let (args1, args2);
|
let (args1, args2);
|
||||||
let mut args = if let Some(llextra) = place.llextra {
|
let mut args = if let Some(llextra) = place.val.llextra {
|
||||||
args2 = [place.llval, llextra];
|
args2 = [place.val.llval, llextra];
|
||||||
&args2[..]
|
&args2[..]
|
||||||
} else {
|
} else {
|
||||||
args1 = [place.llval];
|
args1 = [place.val.llval];
|
||||||
&args1[..]
|
&args1[..]
|
||||||
};
|
};
|
||||||
let (drop_fn, fn_abi, drop_instance) =
|
let (drop_fn, fn_abi, drop_instance) =
|
||||||
@ -918,7 +918,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
let dest = match ret_dest {
|
let dest = match ret_dest {
|
||||||
_ if fn_abi.ret.is_indirect() => llargs[0],
|
_ if fn_abi.ret.is_indirect() => llargs[0],
|
||||||
ReturnDest::Nothing => bx.const_undef(bx.type_ptr()),
|
ReturnDest::Nothing => bx.const_undef(bx.type_ptr()),
|
||||||
ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
|
ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.val.llval,
|
||||||
ReturnDest::DirectOperand(_) => {
|
ReturnDest::DirectOperand(_) => {
|
||||||
bug!("Cannot use direct operand with an intrinsic call")
|
bug!("Cannot use direct operand with an intrinsic call")
|
||||||
}
|
}
|
||||||
@ -951,7 +951,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
match Self::codegen_intrinsic_call(bx, instance, fn_abi, &args, dest, span) {
|
match Self::codegen_intrinsic_call(bx, instance, fn_abi, &args, dest, span) {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
|
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
|
||||||
self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
|
self.store_return(bx, ret_dest, &fn_abi.ret, dst.val.llval);
|
||||||
}
|
}
|
||||||
|
|
||||||
return if let Some(target) = target {
|
return if let Some(target) = target {
|
||||||
@ -1058,16 +1058,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
span_bug!(span, "can't codegen a virtual call on {:#?}", op);
|
span_bug!(span, "can't codegen a virtual call on {:#?}", op);
|
||||||
}
|
}
|
||||||
let place = op.deref(bx.cx());
|
let place = op.deref(bx.cx());
|
||||||
let data_ptr = place.project_field(bx, 0);
|
let data_place = place.project_field(bx, 0);
|
||||||
let meta_ptr = place.project_field(bx, 1);
|
let meta_place = place.project_field(bx, 1);
|
||||||
let meta = bx.load_operand(meta_ptr);
|
let meta = bx.load_operand(meta_place);
|
||||||
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
|
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
|
||||||
bx,
|
bx,
|
||||||
meta.immediate(),
|
meta.immediate(),
|
||||||
op.layout.ty,
|
op.layout.ty,
|
||||||
fn_abi,
|
fn_abi,
|
||||||
));
|
));
|
||||||
llargs.push(data_ptr.llval);
|
llargs.push(data_place.val.llval);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
@ -1082,9 +1082,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
(&mir::Operand::Copy(_), Ref(_, None, _))
|
(&mir::Operand::Copy(_), Ref(_, None, _))
|
||||||
| (&mir::Operand::Constant(_), Ref(_, None, _)) => {
|
| (&mir::Operand::Constant(_), Ref(_, None, _)) => {
|
||||||
let tmp = PlaceRef::alloca(bx, op.layout);
|
let tmp = PlaceRef::alloca(bx, op.layout);
|
||||||
bx.lifetime_start(tmp.llval, tmp.layout.size);
|
bx.lifetime_start(tmp.val.llval, tmp.layout.size);
|
||||||
op.val.store(bx, tmp);
|
op.val.store(bx, tmp);
|
||||||
op.val = Ref(tmp.llval, None, tmp.align);
|
op.val = Ref(tmp.val.llval, None, tmp.val.align);
|
||||||
copied_constant_arguments.push(tmp);
|
copied_constant_arguments.push(tmp);
|
||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
@ -1450,12 +1450,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
};
|
};
|
||||||
let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
|
let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
|
||||||
op.val.store(bx, scratch);
|
op.val.store(bx, scratch);
|
||||||
(scratch.llval, scratch.align, true)
|
(scratch.val.llval, scratch.val.align, true)
|
||||||
}
|
}
|
||||||
PassMode::Cast { .. } => {
|
PassMode::Cast { .. } => {
|
||||||
let scratch = PlaceRef::alloca(bx, arg.layout);
|
let scratch = PlaceRef::alloca(bx, arg.layout);
|
||||||
op.val.store(bx, scratch);
|
op.val.store(bx, scratch);
|
||||||
(scratch.llval, scratch.align, true)
|
(scratch.val.llval, scratch.val.align, true)
|
||||||
}
|
}
|
||||||
_ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
|
_ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
|
||||||
},
|
},
|
||||||
@ -1470,9 +1470,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
// alignment requirements may be higher than the type's alignment, so copy
|
// alignment requirements may be higher than the type's alignment, so copy
|
||||||
// to a higher-aligned alloca.
|
// to a higher-aligned alloca.
|
||||||
let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
|
let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
|
||||||
let op_place = PlaceRef { llval, llextra, layout: op.layout, align };
|
let op_place = PlaceRef {
|
||||||
|
val: PlaceValue { llval, llextra, align },
|
||||||
|
layout: op.layout,
|
||||||
|
};
|
||||||
bx.typed_place_copy(scratch, op_place);
|
bx.typed_place_copy(scratch, op_place);
|
||||||
(scratch.llval, scratch.align, true)
|
(scratch.val.llval, scratch.val.align, true)
|
||||||
} else {
|
} else {
|
||||||
(llval, align, true)
|
(llval, align, true)
|
||||||
}
|
}
|
||||||
@ -1490,7 +1493,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
// a pointer for `repr(C)` structs even when empty, so get
|
// a pointer for `repr(C)` structs even when empty, so get
|
||||||
// one from an `alloca` (which can be left uninitialized).
|
// one from an `alloca` (which can be left uninitialized).
|
||||||
let scratch = PlaceRef::alloca(bx, arg.layout);
|
let scratch = PlaceRef::alloca(bx, arg.layout);
|
||||||
(scratch.llval, scratch.align, true)
|
(scratch.val.llval, scratch.val.align, true)
|
||||||
}
|
}
|
||||||
_ => bug!("ZST {op:?} wasn't ignored, but was passed with abi {arg:?}"),
|
_ => bug!("ZST {op:?} wasn't ignored, but was passed with abi {arg:?}"),
|
||||||
},
|
},
|
||||||
@ -1782,7 +1785,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
// but the calling convention has an indirect return.
|
// but the calling convention has an indirect return.
|
||||||
let tmp = PlaceRef::alloca(bx, fn_ret.layout);
|
let tmp = PlaceRef::alloca(bx, fn_ret.layout);
|
||||||
tmp.storage_live(bx);
|
tmp.storage_live(bx);
|
||||||
llargs.push(tmp.llval);
|
llargs.push(tmp.val.llval);
|
||||||
ReturnDest::IndirectOperand(tmp, index)
|
ReturnDest::IndirectOperand(tmp, index)
|
||||||
} else if intrinsic.is_some() {
|
} else if intrinsic.is_some() {
|
||||||
// Currently, intrinsics always need a location to store
|
// Currently, intrinsics always need a location to store
|
||||||
@ -1803,7 +1806,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
self.codegen_place(bx, mir::PlaceRef { local: dest.local, projection: dest.projection })
|
self.codegen_place(bx, mir::PlaceRef { local: dest.local, projection: dest.projection })
|
||||||
};
|
};
|
||||||
if fn_ret.is_indirect() {
|
if fn_ret.is_indirect() {
|
||||||
if dest.align < dest.layout.align.abi {
|
if dest.val.align < dest.layout.align.abi {
|
||||||
// Currently, MIR code generation does not create calls
|
// Currently, MIR code generation does not create calls
|
||||||
// that store directly to fields of packed structs (in
|
// that store directly to fields of packed structs (in
|
||||||
// fact, the calls it creates write only to temps).
|
// fact, the calls it creates write only to temps).
|
||||||
@ -1812,7 +1815,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
// to create a temporary.
|
// to create a temporary.
|
||||||
span_bug!(self.mir.span, "can't directly store to unaligned value");
|
span_bug!(self.mir.span, "can't directly store to unaligned value");
|
||||||
}
|
}
|
||||||
llargs.push(dest.llval);
|
llargs.push(dest.val.llval);
|
||||||
ReturnDest::Nothing
|
ReturnDest::Nothing
|
||||||
} else {
|
} else {
|
||||||
ReturnDest::Store(dest)
|
ReturnDest::Store(dest)
|
||||||
|
@ -252,7 +252,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
// at least for the cases which LLVM handles correctly.
|
// at least for the cases which LLVM handles correctly.
|
||||||
let spill_slot = PlaceRef::alloca(bx, operand.layout);
|
let spill_slot = PlaceRef::alloca(bx, operand.layout);
|
||||||
if let Some(name) = name {
|
if let Some(name) = name {
|
||||||
bx.set_var_name(spill_slot.llval, &(name + ".dbg.spill"));
|
bx.set_var_name(spill_slot.val.llval, &(name + ".dbg.spill"));
|
||||||
}
|
}
|
||||||
operand.val.store(bx, spill_slot);
|
operand.val.store(bx, spill_slot);
|
||||||
spill_slot
|
spill_slot
|
||||||
@ -331,7 +331,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
if let Some(name) = &name {
|
if let Some(name) = &name {
|
||||||
match local_ref {
|
match local_ref {
|
||||||
LocalRef::Place(place) | LocalRef::UnsizedPlace(place) => {
|
LocalRef::Place(place) | LocalRef::UnsizedPlace(place) => {
|
||||||
bx.set_var_name(place.llval, name);
|
bx.set_var_name(place.val.llval, name);
|
||||||
}
|
}
|
||||||
LocalRef::Operand(operand) => match operand.val {
|
LocalRef::Operand(operand) => match operand.val {
|
||||||
OperandValue::Ref(x, ..) | OperandValue::Immediate(x) => {
|
OperandValue::Ref(x, ..) | OperandValue::Immediate(x) => {
|
||||||
@ -417,16 +417,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
let ptr_ty = Ty::new_mut_ptr(bx.tcx(), place.layout.ty);
|
let ptr_ty = Ty::new_mut_ptr(bx.tcx(), place.layout.ty);
|
||||||
let ptr_layout = bx.layout_of(ptr_ty);
|
let ptr_layout = bx.layout_of(ptr_ty);
|
||||||
let alloca = PlaceRef::alloca(bx, ptr_layout);
|
let alloca = PlaceRef::alloca(bx, ptr_layout);
|
||||||
bx.set_var_name(alloca.llval, &(var.name.to_string() + ".dbg.spill"));
|
bx.set_var_name(alloca.val.llval, &(var.name.to_string() + ".dbg.spill"));
|
||||||
|
|
||||||
// Write the pointer to the variable
|
// Write the pointer to the variable
|
||||||
bx.store(place.llval, alloca.llval, alloca.align);
|
bx.store(place.val.llval, alloca.val.llval, alloca.val.align);
|
||||||
|
|
||||||
// Point the debug info to `*alloca` for the current variable
|
// Point the debug info to `*alloca` for the current variable
|
||||||
bx.dbg_var_addr(
|
bx.dbg_var_addr(
|
||||||
dbg_var,
|
dbg_var,
|
||||||
dbg_loc,
|
dbg_loc,
|
||||||
alloca.llval,
|
alloca.val.llval,
|
||||||
Size::ZERO,
|
Size::ZERO,
|
||||||
&[Size::ZERO],
|
&[Size::ZERO],
|
||||||
var.fragment,
|
var.fragment,
|
||||||
@ -435,7 +435,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
bx.dbg_var_addr(
|
bx.dbg_var_addr(
|
||||||
dbg_var,
|
dbg_var,
|
||||||
dbg_loc,
|
dbg_loc,
|
||||||
base.llval,
|
base.val.llval,
|
||||||
direct_offset,
|
direct_offset,
|
||||||
&indirect_offsets,
|
&indirect_offsets,
|
||||||
var.fragment,
|
var.fragment,
|
||||||
@ -553,7 +553,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
let base =
|
let base =
|
||||||
Self::spill_operand_to_stack(operand, Some(var.name.to_string()), bx);
|
Self::spill_operand_to_stack(operand, Some(var.name.to_string()), bx);
|
||||||
|
|
||||||
bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[], fragment);
|
bx.dbg_var_addr(
|
||||||
|
dbg_var,
|
||||||
|
dbg_loc,
|
||||||
|
base.val.llval,
|
||||||
|
Size::ZERO,
|
||||||
|
&[],
|
||||||
|
fragment,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -387,9 +387,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
let success = bx.from_immediate(success);
|
let success = bx.from_immediate(success);
|
||||||
|
|
||||||
let dest = result.project_field(bx, 0);
|
let dest = result.project_field(bx, 0);
|
||||||
bx.store(val, dest.llval, dest.align);
|
bx.store(val, dest.val.llval, dest.val.align);
|
||||||
let dest = result.project_field(bx, 1);
|
let dest = result.project_field(bx, 1);
|
||||||
bx.store(success, dest.llval, dest.align);
|
bx.store(success, dest.val.llval, dest.val.align);
|
||||||
} else {
|
} else {
|
||||||
invalid_monomorphization(ty);
|
invalid_monomorphization(ty);
|
||||||
}
|
}
|
||||||
@ -511,7 +511,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
|
|
||||||
if !fn_abi.ret.is_ignore() {
|
if !fn_abi.ret.is_ignore() {
|
||||||
if let PassMode::Cast { .. } = &fn_abi.ret.mode {
|
if let PassMode::Cast { .. } = &fn_abi.ret.mode {
|
||||||
bx.store(llval, result.llval, result.align);
|
bx.store(llval, result.val.llval, result.val.align);
|
||||||
} else {
|
} else {
|
||||||
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
|
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
|
||||||
.val
|
.val
|
||||||
|
@ -336,7 +336,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
|||||||
|
|
||||||
if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
|
if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
|
||||||
let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
|
let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
|
||||||
bx.va_start(va_list.llval);
|
bx.va_start(va_list.val.llval);
|
||||||
|
|
||||||
return LocalRef::Place(va_list);
|
return LocalRef::Place(va_list);
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use super::place::PlaceRef;
|
use super::place::{PlaceRef, PlaceValue};
|
||||||
use super::{FunctionCx, LocalRef};
|
use super::{FunctionCx, LocalRef};
|
||||||
|
|
||||||
use crate::size_of_val;
|
use crate::size_of_val;
|
||||||
@ -221,7 +221,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
|||||||
OperandValue::ZeroSized => bug!("Deref of ZST operand {:?}", self),
|
OperandValue::ZeroSized => bug!("Deref of ZST operand {:?}", self),
|
||||||
};
|
};
|
||||||
let layout = cx.layout_of(projected_ty);
|
let layout = cx.layout_of(projected_ty);
|
||||||
PlaceRef { llval: llptr, llextra, layout, align: layout.align.abi }
|
let val = PlaceValue { llval: llptr, llextra, align: layout.align.abi };
|
||||||
|
PlaceRef { val, layout }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If this operand is a `Pair`, we return an aggregate with the two values.
|
/// If this operand is a `Pair`, we return an aggregate with the two values.
|
||||||
@ -409,10 +410,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
|
|||||||
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
|
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
|
||||||
// value is through `undef`/`poison`, and the store itself is useless.
|
// value is through `undef`/`poison`, and the store itself is useless.
|
||||||
}
|
}
|
||||||
OperandValue::Ref(llval, llextra @ None, source_align) => {
|
OperandValue::Ref(llval, None, source_align) => {
|
||||||
assert!(dest.layout.is_sized(), "cannot directly store unsized values");
|
assert!(dest.layout.is_sized(), "cannot directly store unsized values");
|
||||||
let source_place =
|
let source_place = PlaceRef {
|
||||||
PlaceRef { llval, llextra, align: source_align, layout: dest.layout };
|
val: PlaceValue::new_sized(llval, source_align),
|
||||||
|
layout: dest.layout,
|
||||||
|
};
|
||||||
bx.typed_place_copy_with_flags(dest, source_place, flags);
|
bx.typed_place_copy_with_flags(dest, source_place, flags);
|
||||||
}
|
}
|
||||||
OperandValue::Ref(_, Some(_), _) => {
|
OperandValue::Ref(_, Some(_), _) => {
|
||||||
@ -420,7 +423,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
|
|||||||
}
|
}
|
||||||
OperandValue::Immediate(s) => {
|
OperandValue::Immediate(s) => {
|
||||||
let val = bx.from_immediate(s);
|
let val = bx.from_immediate(s);
|
||||||
bx.store_with_flags(val, dest.llval, dest.align, flags);
|
bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
|
||||||
}
|
}
|
||||||
OperandValue::Pair(a, b) => {
|
OperandValue::Pair(a, b) => {
|
||||||
let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
|
let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
|
||||||
@ -429,12 +432,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
|
|||||||
let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
|
let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
|
||||||
|
|
||||||
let val = bx.from_immediate(a);
|
let val = bx.from_immediate(a);
|
||||||
let align = dest.align;
|
let align = dest.val.align;
|
||||||
bx.store_with_flags(val, dest.llval, align, flags);
|
bx.store_with_flags(val, dest.val.llval, align, flags);
|
||||||
|
|
||||||
let llptr = bx.inbounds_ptradd(dest.llval, bx.const_usize(b_offset.bytes()));
|
let llptr = bx.inbounds_ptradd(dest.val.llval, bx.const_usize(b_offset.bytes()));
|
||||||
let val = bx.from_immediate(b);
|
let val = bx.from_immediate(b);
|
||||||
let align = dest.align.restrict_for_offset(b_offset);
|
let align = dest.val.align.restrict_for_offset(b_offset);
|
||||||
bx.store_with_flags(val, llptr, align, flags);
|
bx.store_with_flags(val, llptr, align, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,25 +12,48 @@ use rustc_middle::ty::{self, Ty};
|
|||||||
use rustc_target::abi::{Align, FieldsShape, Int, Pointer, TagEncoding};
|
use rustc_target::abi::{Align, FieldsShape, Int, Pointer, TagEncoding};
|
||||||
use rustc_target::abi::{VariantIdx, Variants};
|
use rustc_target::abi::{VariantIdx, Variants};
|
||||||
|
|
||||||
|
/// The location and extra runtime properties of the place.
|
||||||
|
///
|
||||||
|
/// Typically found in a [`PlaceRef`] or an [`OperandValue::Ref`].
|
||||||
#[derive(Copy, Clone, Debug)]
|
#[derive(Copy, Clone, Debug)]
|
||||||
pub struct PlaceRef<'tcx, V> {
|
pub struct PlaceValue<V> {
|
||||||
/// A pointer to the contents of the place.
|
/// A pointer to the contents of the place.
|
||||||
pub llval: V,
|
pub llval: V,
|
||||||
|
|
||||||
/// This place's extra data if it is unsized, or `None` if null.
|
/// This place's extra data if it is unsized, or `None` if null.
|
||||||
pub llextra: Option<V>,
|
pub llextra: Option<V>,
|
||||||
|
|
||||||
/// The monomorphized type of this place, including variant information.
|
|
||||||
pub layout: TyAndLayout<'tcx>,
|
|
||||||
|
|
||||||
/// The alignment we know for this place.
|
/// The alignment we know for this place.
|
||||||
pub align: Align,
|
pub align: Align,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<V: CodegenObject> PlaceValue<V> {
|
||||||
|
/// Constructor for the ordinary case of `Sized` types.
|
||||||
|
///
|
||||||
|
/// Sets `llextra` to `None`.
|
||||||
|
pub fn new_sized(llval: V, align: Align) -> PlaceValue<V> {
|
||||||
|
PlaceValue { llval, llextra: None, align }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug)]
|
||||||
|
pub struct PlaceRef<'tcx, V> {
|
||||||
|
/// The location and extra runtime properties of the place.
|
||||||
|
pub val: PlaceValue<V>,
|
||||||
|
|
||||||
|
/// The monomorphized type of this place, including variant information.
|
||||||
|
///
|
||||||
|
/// You probably shouldn't use the alignment from this layout;
|
||||||
|
/// rather you should use the `.val.align` of the actual place,
|
||||||
|
/// which might be different from the type's normal alignment.
|
||||||
|
pub layout: TyAndLayout<'tcx>,
|
||||||
|
}
|
||||||
|
|
||||||
impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||||
pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
|
pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
|
||||||
assert!(layout.is_sized());
|
assert!(layout.is_sized());
|
||||||
PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
|
let val = PlaceValue::new_sized(llval, layout.align.abi);
|
||||||
|
PlaceRef { val, layout }
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn new_sized_aligned(
|
pub fn new_sized_aligned(
|
||||||
@ -39,7 +62,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
|||||||
align: Align,
|
align: Align,
|
||||||
) -> PlaceRef<'tcx, V> {
|
) -> PlaceRef<'tcx, V> {
|
||||||
assert!(layout.is_sized());
|
assert!(layout.is_sized());
|
||||||
PlaceRef { llval, llextra: None, layout, align }
|
let val = PlaceValue::new_sized(llval, align);
|
||||||
|
PlaceRef { val, layout }
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME(eddyb) pass something else for the name so no work is done
|
// FIXME(eddyb) pass something else for the name so no work is done
|
||||||
@ -78,7 +102,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
|||||||
if let FieldsShape::Array { count, .. } = self.layout.fields {
|
if let FieldsShape::Array { count, .. } = self.layout.fields {
|
||||||
if self.layout.is_unsized() {
|
if self.layout.is_unsized() {
|
||||||
assert_eq!(count, 0);
|
assert_eq!(count, 0);
|
||||||
self.llextra.unwrap()
|
self.val.llextra.unwrap()
|
||||||
} else {
|
} else {
|
||||||
cx.const_usize(count)
|
cx.const_usize(count)
|
||||||
}
|
}
|
||||||
@ -97,21 +121,27 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
|||||||
) -> Self {
|
) -> Self {
|
||||||
let field = self.layout.field(bx.cx(), ix);
|
let field = self.layout.field(bx.cx(), ix);
|
||||||
let offset = self.layout.fields.offset(ix);
|
let offset = self.layout.fields.offset(ix);
|
||||||
let effective_field_align = self.align.restrict_for_offset(offset);
|
let effective_field_align = self.val.align.restrict_for_offset(offset);
|
||||||
|
|
||||||
// `simple` is called when we don't need to adjust the offset to
|
// `simple` is called when we don't need to adjust the offset to
|
||||||
// the dynamic alignment of the field.
|
// the dynamic alignment of the field.
|
||||||
let mut simple = || {
|
let mut simple = || {
|
||||||
let llval = if offset.bytes() == 0 {
|
let llval = if offset.bytes() == 0 {
|
||||||
self.llval
|
self.val.llval
|
||||||
} else {
|
} else {
|
||||||
bx.inbounds_ptradd(self.llval, bx.const_usize(offset.bytes()))
|
bx.inbounds_ptradd(self.val.llval, bx.const_usize(offset.bytes()))
|
||||||
};
|
};
|
||||||
PlaceRef {
|
PlaceRef {
|
||||||
llval,
|
val: PlaceValue {
|
||||||
llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
|
llval,
|
||||||
|
llextra: if bx.cx().type_has_metadata(field.ty) {
|
||||||
|
self.val.llextra
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
|
align: effective_field_align,
|
||||||
|
},
|
||||||
layout: field,
|
layout: field,
|
||||||
align: effective_field_align,
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -142,7 +172,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
|||||||
// The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
|
// The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
|
||||||
// the `y` field has 16-bit alignment.
|
// the `y` field has 16-bit alignment.
|
||||||
|
|
||||||
let meta = self.llextra;
|
let meta = self.val.llextra;
|
||||||
|
|
||||||
let unaligned_offset = bx.cx().const_usize(offset.bytes());
|
let unaligned_offset = bx.cx().const_usize(offset.bytes());
|
||||||
|
|
||||||
@ -164,9 +194,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
|||||||
debug!("struct_field_ptr: DST field offset: {:?}", offset);
|
debug!("struct_field_ptr: DST field offset: {:?}", offset);
|
||||||
|
|
||||||
// Adjust pointer.
|
// Adjust pointer.
|
||||||
let ptr = bx.inbounds_ptradd(self.llval, offset);
|
let ptr = bx.inbounds_ptradd(self.val.llval, offset);
|
||||||
|
let val =
|
||||||
PlaceRef { llval: ptr, llextra: self.llextra, layout: field, align: effective_field_align }
|
PlaceValue { llval: ptr, llextra: self.val.llextra, align: effective_field_align };
|
||||||
|
PlaceRef { val, layout: field }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Obtain the actual discriminant of a value.
|
/// Obtain the actual discriminant of a value.
|
||||||
@ -314,8 +345,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
|||||||
self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
|
self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
|
||||||
bx.store(
|
bx.store(
|
||||||
bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
|
bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
|
||||||
ptr.llval,
|
ptr.val.llval,
|
||||||
ptr.align,
|
ptr.val.align,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Variants::Multiple {
|
Variants::Multiple {
|
||||||
@ -357,14 +388,16 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
PlaceRef {
|
PlaceRef {
|
||||||
llval: bx.inbounds_gep(
|
val: PlaceValue {
|
||||||
bx.cx().backend_type(self.layout),
|
llval: bx.inbounds_gep(
|
||||||
self.llval,
|
bx.cx().backend_type(self.layout),
|
||||||
&[bx.cx().const_usize(0), llindex],
|
self.val.llval,
|
||||||
),
|
&[bx.cx().const_usize(0), llindex],
|
||||||
llextra: None,
|
),
|
||||||
|
llextra: None,
|
||||||
|
align: self.val.align.restrict_for_offset(offset),
|
||||||
|
},
|
||||||
layout,
|
layout,
|
||||||
align: self.align.restrict_for_offset(offset),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -389,11 +422,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
|
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
|
||||||
bx.lifetime_start(self.llval, self.layout.size);
|
bx.lifetime_start(self.val.llval, self.layout.size);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
|
pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
|
||||||
bx.lifetime_end(self.llval, self.layout.size);
|
bx.lifetime_end(self.val.llval, self.layout.size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -461,8 +494,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
|
|
||||||
if subslice.layout.is_unsized() {
|
if subslice.layout.is_unsized() {
|
||||||
assert!(from_end, "slice subslices should be `from_end`");
|
assert!(from_end, "slice subslices should be `from_end`");
|
||||||
subslice.llextra =
|
subslice.val.llextra = Some(
|
||||||
Some(bx.sub(cg_base.llextra.unwrap(), bx.cx().const_usize(from + to)));
|
bx.sub(cg_base.val.llextra.unwrap(), bx.cx().const_usize(from + to)),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
subslice
|
subslice
|
||||||
|
@ -95,20 +95,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let OperandValue::Immediate(v) = cg_elem.val {
|
if let OperandValue::Immediate(v) = cg_elem.val {
|
||||||
let start = dest.llval;
|
let start = dest.val.llval;
|
||||||
let size = bx.const_usize(dest.layout.size.bytes());
|
let size = bx.const_usize(dest.layout.size.bytes());
|
||||||
|
|
||||||
// Use llvm.memset.p0i8.* to initialize all zero arrays
|
// Use llvm.memset.p0i8.* to initialize all zero arrays
|
||||||
if bx.cx().const_to_opt_u128(v, false) == Some(0) {
|
if bx.cx().const_to_opt_u128(v, false) == Some(0) {
|
||||||
let fill = bx.cx().const_u8(0);
|
let fill = bx.cx().const_u8(0);
|
||||||
bx.memset(start, fill, size, dest.align, MemFlags::empty());
|
bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use llvm.memset.p0i8.* to initialize byte arrays
|
// Use llvm.memset.p0i8.* to initialize byte arrays
|
||||||
let v = bx.from_immediate(v);
|
let v = bx.from_immediate(v);
|
||||||
if bx.cx().val_ty(v) == bx.cx().type_i8() {
|
if bx.cx().val_ty(v) == bx.cx().type_i8() {
|
||||||
bx.memset(start, v, size, dest.align, MemFlags::empty());
|
bx.memset(start, v, size, dest.val.align, MemFlags::empty());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -182,7 +182,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
OperandValue::Immediate(..) | OperandValue::Pair(..) => {
|
OperandValue::Immediate(..) | OperandValue::Pair(..) => {
|
||||||
// When we have immediate(s), the alignment of the source is irrelevant,
|
// When we have immediate(s), the alignment of the source is irrelevant,
|
||||||
// so we can store them using the destination's alignment.
|
// so we can store them using the destination's alignment.
|
||||||
src.val.store(bx, PlaceRef::new_sized_aligned(dst.llval, src.layout, dst.align));
|
src.val.store(
|
||||||
|
bx,
|
||||||
|
PlaceRef::new_sized_aligned(dst.val.llval, src.layout, dst.val.align),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -375,7 +378,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
) {
|
) {
|
||||||
debug!(
|
debug!(
|
||||||
"codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
|
"codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
|
||||||
indirect_dest.llval, rvalue
|
indirect_dest.val.llval, rvalue
|
||||||
);
|
);
|
||||||
|
|
||||||
match *rvalue {
|
match *rvalue {
|
||||||
@ -765,9 +768,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||||||
// Note: places are indirect, so storing the `llval` into the
|
// Note: places are indirect, so storing the `llval` into the
|
||||||
// destination effectively creates a reference.
|
// destination effectively creates a reference.
|
||||||
let val = if !bx.cx().type_has_metadata(ty) {
|
let val = if !bx.cx().type_has_metadata(ty) {
|
||||||
OperandValue::Immediate(cg_place.llval)
|
OperandValue::Immediate(cg_place.val.llval)
|
||||||
} else {
|
} else {
|
||||||
OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
|
OperandValue::Pair(cg_place.val.llval, cg_place.val.llextra.unwrap())
|
||||||
};
|
};
|
||||||
OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
|
OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
|
||||||
}
|
}
|
||||||
|
@ -290,14 +290,14 @@ pub trait BuilderMethods<'a, 'tcx>:
|
|||||||
src: PlaceRef<'tcx, Self::Value>,
|
src: PlaceRef<'tcx, Self::Value>,
|
||||||
flags: MemFlags,
|
flags: MemFlags,
|
||||||
) {
|
) {
|
||||||
debug_assert!(src.llextra.is_none(), "cannot directly copy from unsized values");
|
debug_assert!(src.val.llextra.is_none(), "cannot directly copy from unsized values");
|
||||||
debug_assert!(dst.llextra.is_none(), "cannot directly copy into unsized values");
|
debug_assert!(dst.val.llextra.is_none(), "cannot directly copy into unsized values");
|
||||||
debug_assert_eq!(dst.layout.size, src.layout.size);
|
debug_assert_eq!(dst.layout.size, src.layout.size);
|
||||||
if flags.contains(MemFlags::NONTEMPORAL) {
|
if flags.contains(MemFlags::NONTEMPORAL) {
|
||||||
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
|
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
|
||||||
let ty = self.backend_type(dst.layout);
|
let ty = self.backend_type(dst.layout);
|
||||||
let val = self.load(ty, src.llval, src.align);
|
let val = self.load(ty, src.val.llval, src.val.align);
|
||||||
self.store_with_flags(val, dst.llval, dst.align, flags);
|
self.store_with_flags(val, dst.val.llval, dst.val.align, flags);
|
||||||
} else if self.sess().opts.optimize == OptLevel::No && self.is_backend_immediate(dst.layout)
|
} else if self.sess().opts.optimize == OptLevel::No && self.is_backend_immediate(dst.layout)
|
||||||
{
|
{
|
||||||
// If we're not optimizing, the aliasing information from `memcpy`
|
// If we're not optimizing, the aliasing information from `memcpy`
|
||||||
@ -306,7 +306,7 @@ pub trait BuilderMethods<'a, 'tcx>:
|
|||||||
temp.val.store_with_flags(self, dst, flags);
|
temp.val.store_with_flags(self, dst, flags);
|
||||||
} else if !dst.layout.is_zst() {
|
} else if !dst.layout.is_zst() {
|
||||||
let bytes = self.const_usize(dst.layout.size.bytes());
|
let bytes = self.const_usize(dst.layout.size.bytes());
|
||||||
self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags);
|
self.memcpy(dst.val.llval, dst.val.align, src.val.llval, src.val.align, bytes, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user