Rollup merge of #123775 - scottmcm:place-val, r=cjgillot

Make `PlaceRef` and `OperandValue::Ref` share a common `PlaceValue` type

Both `PlaceRef` and `OperandValue::Ref` need the triple of the backend pointer immediate, the optional backend metadata for DSTs, and the actual alignment of the place (since it can differ from the ABI alignment).

This PR introduces a new `PlaceValue` type for those three values, leaving [`PlaceRef`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_codegen_ssa/mir/place/struct.PlaceRef.html) with the `TyAndLayout` and a `PlaceValue`, just like how [`OperandRef`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_codegen_ssa/mir/operand/struct.OperandRef.html) is a `TyAndLayout` and an `OperandValue`.

This means that various places that use `Ref`s as places can just pass the `PlaceValue` along, like in the below excerpt from the diff:
```diff
        match operand.val {
-            OperandValue::Ref(ptr, meta, align) => {
-                debug_assert_eq!(meta, None);
+            OperandValue::Ref(source_place_val) => {
+                debug_assert_eq!(source_place_val.llextra, None);
                debug_assert!(matches!(operand_kind, OperandValueKind::Ref));
-                let fake_place = PlaceRef::new_sized_aligned(ptr, cast, align);
+                let fake_place = PlaceRef { val: source_place_val, layout: cast };
                Some(bx.load_operand(fake_place).val)
            }
```

There's more refactoring that I'd like to do after this, but I wanted to stop the PR here where it's hopefully easy (albeit probably not quick) to review since I tried to keep every change line-by-line clear.  (Most are just adding `.val` to get to a field.)

You can also go commit-at-a-time if you'd like.  Each passed tidy and the codegen tests on my machine (though I didn't run the cg_gcc ones).
This commit is contained in:
Matthias Krüger 2024-04-12 04:38:21 +02:00 committed by GitHub
commit f4f644182b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 239 additions and 169 deletions

View File

@ -974,7 +974,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
&mut self,
place: PlaceRef<'tcx, RValue<'gcc>>,
) -> OperandRef<'tcx, RValue<'gcc>> {
assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized());
if place.layout.is_zst() {
return OperandRef::zero_sized(place.layout);
@ -999,10 +999,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
}
}
let val = if let Some(llextra) = place.llextra {
OperandValue::Ref(place.llval, Some(llextra), place.align)
let val = if let Some(_) = place.val.llextra {
// FIXME: Merge with the `else` below?
OperandValue::Ref(place.val)
} else if place.layout.is_gcc_immediate() {
let load = self.load(place.layout.gcc_type(self), place.llval, place.align);
let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align);
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar);
}
@ -1012,9 +1013,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
let mut load = |i, scalar: &abi::Scalar, align| {
let llptr = if i == 0 {
place.llval
place.val.llval
} else {
self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes()))
};
let llty = place.layout.scalar_pair_element_gcc_type(self, i);
let load = self.load(llty, llptr, align);
@ -1027,11 +1028,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
};
OperandValue::Pair(
load(0, a, place.align),
load(1, b, place.align.restrict_for_offset(b_offset)),
load(0, a, place.val.align),
load(1, b, place.val.align.restrict_for_offset(b_offset)),
)
} else {
OperandValue::Ref(place.llval, None, place.align)
OperandValue::Ref(place.val)
};
OperandRef { val, layout: place.layout }
@ -1045,8 +1046,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
) {
let zero = self.const_usize(0);
let count = self.const_usize(count);
let start = dest.project_index(self, zero).llval;
let end = dest.project_index(self, count).llval;
let start = dest.project_index(self, zero).val.llval;
let end = dest.project_index(self, count).val.llval;
let header_bb = self.append_sibling_block("repeat_loop_header");
let body_bb = self.append_sibling_block("repeat_loop_body");
@ -1064,7 +1065,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
self.cond_br(keep_going, body_bb, next_bb);
self.switch_to_block(body_bb);
let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
let align = dest.val.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
let next = self.inbounds_gep(

View File

@ -11,7 +11,7 @@ use rustc_codegen_ssa::base::wants_msvc_seh;
use rustc_codegen_ssa::common::IntPredicate;
use rustc_codegen_ssa::errors::InvalidMonomorphization;
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
use rustc_codegen_ssa::traits::{
ArgAbiMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods,
};
@ -354,7 +354,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
let block = self.llbb();
let extended_asm = block.add_extended_asm(None, "");
extended_asm.add_input_operand(None, "r", result.llval);
extended_asm.add_input_operand(None, "r", result.val.llval);
extended_asm.add_clobber("memory");
extended_asm.set_volatile_flag(true);
@ -388,8 +388,8 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
if !fn_abi.ret.is_ignore() {
if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
let ptr = self.pointercast(result.llval, ptr_llty);
self.store(llval, ptr, result.align);
let ptr = self.pointercast(result.val.llval, ptr_llty);
self.store(llval, ptr, result.val.align);
} else {
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
.val
@ -502,7 +502,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
return;
}
if self.is_sized_indirect() {
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
OperandValue::Ref(PlaceValue::new_sized(val, self.layout.align.abi)).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
} else if let PassMode::Cast { ref cast, .. } = self.mode {
@ -511,7 +511,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
let cast_dst = bx.pointercast(dst.val.llval, cast_ptr_llty);
bx.store(val, cast_dst, self.layout.align.abi);
} else {
// The actual return type is a struct, but the ABI
@ -539,7 +539,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
// ... and then memcpy it to the intended destination.
bx.memcpy(
dst.llval,
dst.val.llval,
self.layout.align.abi,
llscratch,
scratch_align,
@ -571,7 +571,12 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
OperandValue::Pair(next(), next()).store(bx, dst);
}
PassMode::Indirect { meta_attrs: Some(_), .. } => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
let place_val = PlaceValue {
llval: next(),
llextra: Some(next()),
align: self.layout.align.abi,
};
OperandValue::Ref(place_val).store(bx, dst);
}
PassMode::Direct(_)
| PassMode::Indirect { meta_attrs: None, .. }

View File

@ -82,7 +82,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
let place = PlaceRef::alloca(bx, args[0].layout);
args[0].val.store(bx, place);
let int_ty = bx.type_ix(expected_bytes * 8);
let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
let ptr = bx.pointercast(place.val.llval, bx.cx.type_ptr_to(int_ty));
bx.load(int_ty, ptr, Align::ONE)
}
_ => return_error!(InvalidMonomorphization::InvalidBitmask {

View File

@ -7,7 +7,7 @@ use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::MemFlags;
use rustc_middle::bug;
@ -207,7 +207,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
// Sized indirect arguments
PassMode::Indirect { attrs, meta_attrs: None, on_stack: _ } => {
let align = attrs.pointee_align.unwrap_or(self.layout.align.abi);
OperandValue::Ref(val, None, align).store(bx, dst);
OperandValue::Ref(PlaceValue::new_sized(val, align)).store(bx, dst);
}
// Unsized indirect qrguments
PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
@ -233,7 +233,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
bx.store(val, llscratch, scratch_align);
// ... and then memcpy it to the intended destination.
bx.memcpy(
dst.llval,
dst.val.llval,
self.layout.align.abi,
llscratch,
scratch_align,
@ -265,7 +265,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
OperandValue::Pair(next(), next()).store(bx, dst);
}
PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
let place_val = PlaceValue {
llval: next(),
llextra: Some(next()),
align: self.layout.align.abi,
};
OperandValue::Ref(place_val).store(bx, dst);
}
PassMode::Direct(_)
| PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ }

View File

@ -535,7 +535,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
panic!("unsized locals must not be `extern` types");
}
}
assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized());
if place.layout.is_zst() {
return OperandRef::zero_sized(place.layout);
@ -579,13 +579,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
let val = if let Some(llextra) = place.llextra {
OperandValue::Ref(place.llval, Some(llextra), place.align)
let val = if let Some(_) = place.val.llextra {
// FIXME: Merge with the `else` below?
OperandValue::Ref(place.val)
} else if place.layout.is_llvm_immediate() {
let mut const_llval = None;
let llty = place.layout.llvm_type(self);
unsafe {
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) {
if llvm::LLVMIsGlobalConstant(global) == llvm::True {
if let Some(init) = llvm::LLVMGetInitializer(global) {
if self.val_ty(init) == llty {
@ -596,7 +597,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
let llval = const_llval.unwrap_or_else(|| {
let load = self.load(llty, place.llval, place.align);
let load = self.load(llty, place.val.llval, place.val.align);
if let abi::Abi::Scalar(scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
}
@ -608,9 +609,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
let llptr = if i == 0 {
place.llval
place.val.llval
} else {
self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes()))
};
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
let load = self.load(llty, llptr, align);
@ -619,11 +620,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
};
OperandValue::Pair(
load(0, a, place.layout, place.align, Size::ZERO),
load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset),
load(0, a, place.layout, place.val.align, Size::ZERO),
load(1, b, place.layout, place.val.align.restrict_for_offset(b_offset), b_offset),
)
} else {
OperandValue::Ref(place.llval, None, place.align)
OperandValue::Ref(place.val)
};
OperandRef { val, layout: place.layout }

View File

@ -264,7 +264,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
llvm::LLVMSetAlignment(load, align);
}
if !result.layout.is_zst() {
self.store(load, result.llval, result.align);
self.store_to_place(load, result.val);
}
return Ok(());
}
@ -428,7 +428,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
sym::black_box => {
args[0].val.store(self, result);
let result_val_span = [result.llval];
let result_val_span = [result.val.llval];
// We need to "use" the argument in some way LLVM can't introspect, and on
// targets that support it we can typically leverage inline assembly to do
// this. LLVM's interpretation of inline assembly is that it's, well, a black
@ -482,7 +482,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
if !fn_abi.ret.is_ignore() {
if let PassMode::Cast { .. } = &fn_abi.ret.mode {
self.store(llval, result.llval, result.align);
self.store(llval, result.val.llval, result.val.align);
} else {
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
.val
@ -1065,7 +1065,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let place = PlaceRef::alloca(bx, args[0].layout);
args[0].val.store(bx, place);
let int_ty = bx.type_ix(expected_bytes * 8);
bx.load(int_ty, place.llval, Align::ONE)
bx.load(int_ty, place.val.llval, Align::ONE)
}
_ => return_error!(InvalidMonomorphization::InvalidBitmask {
span,

View File

@ -1,6 +1,6 @@
use super::operand::OperandRef;
use super::operand::OperandValue::{Immediate, Pair, Ref, ZeroSized};
use super::place::PlaceRef;
use super::place::{PlaceRef, PlaceValue};
use super::{CachedLlbb, FunctionCx, LocalRef};
use crate::base;
@ -242,7 +242,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
bx.switch_to_block(fx.llbb(target));
fx.set_debug_loc(bx, self.terminator.source_info);
for tmp in copied_constant_arguments {
bx.lifetime_end(tmp.llval, tmp.layout.size);
bx.lifetime_end(tmp.val.llval, tmp.layout.size);
}
fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
}
@ -256,7 +256,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
if let Some((ret_dest, target)) = destination {
for tmp in copied_constant_arguments {
bx.lifetime_end(tmp.llval, tmp.layout.size);
bx.lifetime_end(tmp.val.llval, tmp.layout.size);
}
fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
self.funclet_br(fx, bx, target, mergeable_succ)
@ -431,7 +431,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let va_list_arg_idx = self.fn_abi.args.len();
match self.locals[mir::Local::from_usize(1 + va_list_arg_idx)] {
LocalRef::Place(va_list) => {
bx.va_end(va_list.llval);
bx.va_end(va_list.val.llval);
}
_ => bug!("C-variadic function must have a `VaList` place"),
}
@ -455,8 +455,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
PassMode::Direct(_) | PassMode::Pair(..) => {
let op = self.codegen_consume(bx, mir::Place::return_place().as_ref());
if let Ref(llval, _, align) = op.val {
bx.load(bx.backend_type(op.layout), llval, align)
if let Ref(place_val) = op.val {
bx.load_from_place(bx.backend_type(op.layout), place_val)
} else {
op.immediate_or_packed_pair(bx)
}
@ -466,21 +466,23 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let op = match self.locals[mir::RETURN_PLACE] {
LocalRef::Operand(op) => op,
LocalRef::PendingOperand => bug!("use of return before def"),
LocalRef::Place(cg_place) => OperandRef {
val: Ref(cg_place.llval, None, cg_place.align),
layout: cg_place.layout,
},
LocalRef::Place(cg_place) => {
OperandRef { val: Ref(cg_place.val), layout: cg_place.layout }
}
LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let scratch = PlaceRef::alloca(bx, self.fn_abi.ret.layout);
op.val.store(bx, scratch);
scratch.llval
scratch.val.llval
}
Ref(llval, _, align) => {
assert_eq!(align, op.layout.align.abi, "return place is unaligned!");
llval
Ref(place_val) => {
assert_eq!(
place_val.align, op.layout.align.abi,
"return place is unaligned!"
);
place_val.llval
}
ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"),
};
@ -512,11 +514,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let place = self.codegen_place(bx, location.as_ref());
let (args1, args2);
let mut args = if let Some(llextra) = place.llextra {
args2 = [place.llval, llextra];
let mut args = if let Some(llextra) = place.val.llextra {
args2 = [place.val.llval, llextra];
&args2[..]
} else {
args1 = [place.llval];
args1 = [place.val.llval];
&args1[..]
};
let (drop_fn, fn_abi, drop_instance) =
@ -918,7 +920,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let dest = match ret_dest {
_ if fn_abi.ret.is_indirect() => llargs[0],
ReturnDest::Nothing => bx.const_undef(bx.type_ptr()),
ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.val.llval,
ReturnDest::DirectOperand(_) => {
bug!("Cannot use direct operand with an intrinsic call")
}
@ -951,7 +953,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match Self::codegen_intrinsic_call(bx, instance, fn_abi, &args, dest, span) {
Ok(()) => {
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
self.store_return(bx, ret_dest, &fn_abi.ret, dst.val.llval);
}
return if let Some(target) = target {
@ -1032,7 +1034,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
llargs.push(data_ptr);
continue 'make_args;
}
Ref(data_ptr, Some(meta), _) => {
Ref(PlaceValue { llval: data_ptr, llextra: Some(meta), .. }) => {
// by-value dynamic dispatch
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
bx,
@ -1058,16 +1060,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
span_bug!(span, "can't codegen a virtual call on {:#?}", op);
}
let place = op.deref(bx.cx());
let data_ptr = place.project_field(bx, 0);
let meta_ptr = place.project_field(bx, 1);
let meta = bx.load_operand(meta_ptr);
let data_place = place.project_field(bx, 0);
let meta_place = place.project_field(bx, 1);
let meta = bx.load_operand(meta_place);
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
bx,
meta.immediate(),
op.layout.ty,
fn_abi,
));
llargs.push(data_ptr.llval);
llargs.push(data_place.val.llval);
continue;
}
_ => {
@ -1079,12 +1081,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// The callee needs to own the argument memory if we pass it
// by-ref, so make a local copy of non-immediate constants.
match (&arg.node, op.val) {
(&mir::Operand::Copy(_), Ref(_, None, _))
| (&mir::Operand::Constant(_), Ref(_, None, _)) => {
(&mir::Operand::Copy(_), Ref(PlaceValue { llextra: None, .. }))
| (&mir::Operand::Constant(_), Ref(PlaceValue { llextra: None, .. })) => {
let tmp = PlaceRef::alloca(bx, op.layout);
bx.lifetime_start(tmp.llval, tmp.layout.size);
bx.lifetime_start(tmp.val.llval, tmp.layout.size);
op.val.store(bx, tmp);
op.val = Ref(tmp.llval, None, tmp.align);
op.val = Ref(tmp.val);
copied_constant_arguments.push(tmp);
}
_ => {}
@ -1428,7 +1430,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => bug!("codegen_argument: {:?} invalid for pair argument", op),
},
PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => match op.val {
Ref(a, Some(b), _) => {
Ref(PlaceValue { llval: a, llextra: Some(b), .. }) => {
llargs.push(a);
llargs.push(b);
return;
@ -1450,34 +1452,34 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
(scratch.val.llval, scratch.val.align, true)
}
PassMode::Cast { .. } => {
let scratch = PlaceRef::alloca(bx, arg.layout);
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
(scratch.val.llval, scratch.val.align, true)
}
_ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
},
Ref(llval, llextra, align) => match arg.mode {
Ref(op_place_val) => match arg.mode {
PassMode::Indirect { attrs, .. } => {
let required_align = match attrs.pointee_align {
Some(pointee_align) => cmp::max(pointee_align, arg.layout.align.abi),
None => arg.layout.align.abi,
};
if align < required_align {
if op_place_val.align < required_align {
// For `foo(packed.large_field)`, and types with <4 byte alignment on x86,
// alignment requirements may be higher than the type's alignment, so copy
// to a higher-aligned alloca.
let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align);
let op_place = PlaceRef { llval, llextra, layout: op.layout, align };
let op_place = PlaceRef { val: op_place_val, layout: op.layout };
bx.typed_place_copy(scratch, op_place);
(scratch.llval, scratch.align, true)
(scratch.val.llval, scratch.val.align, true)
} else {
(llval, align, true)
(op_place_val.llval, op_place_val.align, true)
}
}
_ => (llval, align, true),
_ => (op_place_val.llval, op_place_val.align, true),
},
ZeroSized => match arg.mode {
PassMode::Indirect { on_stack, .. } => {
@ -1490,7 +1492,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// a pointer for `repr(C)` structs even when empty, so get
// one from an `alloca` (which can be left uninitialized).
let scratch = PlaceRef::alloca(bx, arg.layout);
(scratch.llval, scratch.align, true)
(scratch.val.llval, scratch.val.align, true)
}
_ => bug!("ZST {op:?} wasn't ignored, but was passed with abi {arg:?}"),
},
@ -1557,15 +1559,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let tuple = self.codegen_operand(bx, operand);
// Handle both by-ref and immediate tuples.
if let Ref(llval, None, align) = tuple.val {
let tuple_ptr = PlaceRef::new_sized_aligned(llval, tuple.layout, align);
if let Ref(place_val) = tuple.val {
if place_val.llextra.is_some() {
bug!("closure arguments must be sized");
}
let tuple_ptr = PlaceRef { val: place_val, layout: tuple.layout };
for i in 0..tuple.layout.fields.count() {
let field_ptr = tuple_ptr.project_field(bx, i);
let field = bx.load_operand(field_ptr);
self.codegen_argument(bx, field, llargs, &args[i]);
}
} else if let Ref(_, Some(_), _) = tuple.val {
bug!("closure arguments must be sized")
} else {
// If the tuple is immediate, the elements are as well.
for i in 0..tuple.layout.fields.count() {
@ -1782,7 +1785,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// but the calling convention has an indirect return.
let tmp = PlaceRef::alloca(bx, fn_ret.layout);
tmp.storage_live(bx);
llargs.push(tmp.llval);
llargs.push(tmp.val.llval);
ReturnDest::IndirectOperand(tmp, index)
} else if intrinsic.is_some() {
// Currently, intrinsics always need a location to store
@ -1803,7 +1806,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_place(bx, mir::PlaceRef { local: dest.local, projection: dest.projection })
};
if fn_ret.is_indirect() {
if dest.align < dest.layout.align.abi {
if dest.val.align < dest.layout.align.abi {
// Currently, MIR code generation does not create calls
// that store directly to fields of packed structs (in
// fact, the calls it creates write only to temps).
@ -1812,7 +1815,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// to create a temporary.
span_bug!(self.mir.span, "can't directly store to unaligned value");
}
llargs.push(dest.llval);
llargs.push(dest.val.llval);
ReturnDest::Nothing
} else {
ReturnDest::Store(dest)

View File

@ -14,7 +14,7 @@ use rustc_span::{BytePos, Span};
use rustc_target::abi::{Abi, FieldIdx, FieldsShape, Size, VariantIdx};
use super::operand::{OperandRef, OperandValue};
use super::place::PlaceRef;
use super::place::{PlaceRef, PlaceValue};
use super::{FunctionCx, LocalRef};
use std::ops::Range;
@ -252,7 +252,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// at least for the cases which LLVM handles correctly.
let spill_slot = PlaceRef::alloca(bx, operand.layout);
if let Some(name) = name {
bx.set_var_name(spill_slot.llval, &(name + ".dbg.spill"));
bx.set_var_name(spill_slot.val.llval, &(name + ".dbg.spill"));
}
operand.val.store(bx, spill_slot);
spill_slot
@ -331,10 +331,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if let Some(name) = &name {
match local_ref {
LocalRef::Place(place) | LocalRef::UnsizedPlace(place) => {
bx.set_var_name(place.llval, name);
bx.set_var_name(place.val.llval, name);
}
LocalRef::Operand(operand) => match operand.val {
OperandValue::Ref(x, ..) | OperandValue::Immediate(x) => {
OperandValue::Ref(PlaceValue { llval: x, .. }) | OperandValue::Immediate(x) => {
bx.set_var_name(x, name);
}
OperandValue::Pair(a, b) => {
@ -417,16 +417,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let ptr_ty = Ty::new_mut_ptr(bx.tcx(), place.layout.ty);
let ptr_layout = bx.layout_of(ptr_ty);
let alloca = PlaceRef::alloca(bx, ptr_layout);
bx.set_var_name(alloca.llval, &(var.name.to_string() + ".dbg.spill"));
bx.set_var_name(alloca.val.llval, &(var.name.to_string() + ".dbg.spill"));
// Write the pointer to the variable
bx.store(place.llval, alloca.llval, alloca.align);
bx.store_to_place(place.val.llval, alloca.val);
// Point the debug info to `*alloca` for the current variable
bx.dbg_var_addr(
dbg_var,
dbg_loc,
alloca.llval,
alloca.val.llval,
Size::ZERO,
&[Size::ZERO],
var.fragment,
@ -435,7 +435,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.dbg_var_addr(
dbg_var,
dbg_loc,
base.llval,
base.val.llval,
direct_offset,
&indirect_offsets,
var.fragment,
@ -553,7 +553,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let base =
Self::spill_operand_to_stack(operand, Some(var.name.to_string()), bx);
bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[], fragment);
bx.dbg_var_addr(
dbg_var,
dbg_loc,
base.val.llval,
Size::ZERO,
&[],
fragment,
);
}
}
}

View File

@ -387,9 +387,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let success = bx.from_immediate(success);
let dest = result.project_field(bx, 0);
bx.store(val, dest.llval, dest.align);
bx.store_to_place(val, dest.val);
let dest = result.project_field(bx, 1);
bx.store(success, dest.llval, dest.align);
bx.store_to_place(success, dest.val);
} else {
invalid_monomorphization(ty);
}
@ -511,7 +511,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if !fn_abi.ret.is_ignore() {
if let PassMode::Cast { .. } = &fn_abi.ret.mode {
bx.store(llval, result.llval, result.align);
bx.store_to_place(llval, result.val);
} else {
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
.val

View File

@ -336,7 +336,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
bx.va_start(va_list.llval);
bx.va_start(va_list.val.llval);
return LocalRef::Place(va_list);
}

View File

@ -1,4 +1,4 @@
use super::place::PlaceRef;
use super::place::{PlaceRef, PlaceValue};
use super::{FunctionCx, LocalRef};
use crate::size_of_val;
@ -23,11 +23,14 @@ pub enum OperandValue<V> {
/// The second value, if any, is the extra data (vtable or length)
/// which indicates that it refers to an unsized rvalue.
///
/// An `OperandValue` has this variant for types which are neither
/// `Immediate` nor `Pair`s. The backend value in this variant must be a
/// pointer to the *non*-immediate backend type. That pointee type is the
/// An `OperandValue` *must* be this variant for any type for which
/// [`LayoutTypeMethods::is_backend_ref`] returns `true`.
/// (That basically amounts to "isn't one of the other variants".)
///
/// This holds a [`PlaceValue`] (like a [`PlaceRef`] does) with a pointer
/// to the location holding the value. The type behind that pointer is the
/// one returned by [`LayoutTypeMethods::backend_type`].
Ref(V, Option<V>, Align),
Ref(PlaceValue<V>),
/// A single LLVM immediate value.
///
/// An `OperandValue` *must* be this variant for any type for which
@ -221,7 +224,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
OperandValue::ZeroSized => bug!("Deref of ZST operand {:?}", self),
};
let layout = cx.layout_of(projected_ty);
PlaceRef { llval: llptr, llextra, layout, align: layout.align.abi }
let val = PlaceValue { llval: llptr, llextra, align: layout.align.abi };
PlaceRef { val, layout }
}
/// If this operand is a `Pair`, we return an aggregate with the two values.
@ -361,7 +365,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1))
} else {
let ptr = bx.cx().type_ptr();
OperandValue::Ref(bx.const_poison(ptr), None, layout.align.abi)
OperandValue::Ref(PlaceValue::new_sized(bx.const_poison(ptr), layout.align.abi))
}
}
@ -409,18 +413,17 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
// Avoid generating stores of zero-sized values, because the only way to have a zero-sized
// value is through `undef`/`poison`, and the store itself is useless.
}
OperandValue::Ref(llval, llextra @ None, source_align) => {
OperandValue::Ref(val) => {
assert!(dest.layout.is_sized(), "cannot directly store unsized values");
let source_place =
PlaceRef { llval, llextra, align: source_align, layout: dest.layout };
if val.llextra.is_some() {
bug!("cannot directly store unsized values");
}
let source_place = PlaceRef { val, layout: dest.layout };
bx.typed_place_copy_with_flags(dest, source_place, flags);
}
OperandValue::Ref(_, Some(_), _) => {
bug!("cannot directly store unsized values");
}
OperandValue::Immediate(s) => {
let val = bx.from_immediate(s);
bx.store_with_flags(val, dest.llval, dest.align, flags);
bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
}
OperandValue::Pair(a, b) => {
let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
@ -429,12 +432,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
let val = bx.from_immediate(a);
let align = dest.align;
bx.store_with_flags(val, dest.llval, align, flags);
let align = dest.val.align;
bx.store_with_flags(val, dest.val.llval, align, flags);
let llptr = bx.inbounds_ptradd(dest.llval, bx.const_usize(b_offset.bytes()));
let llptr = bx.inbounds_ptradd(dest.val.llval, bx.const_usize(b_offset.bytes()));
let val = bx.from_immediate(b);
let align = dest.align.restrict_for_offset(b_offset);
let align = dest.val.align.restrict_for_offset(b_offset);
bx.store_with_flags(val, llptr, align, flags);
}
}
@ -454,7 +457,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
.unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest))
.ty;
let OperandValue::Ref(llptr, Some(llextra), _) = self else {
let OperandValue::Ref(PlaceValue { llval: llptr, llextra: Some(llextra), .. }) = self
else {
bug!("store_unsized called with a sized value (or with an extern type)")
};

View File

@ -12,25 +12,48 @@ use rustc_middle::ty::{self, Ty};
use rustc_target::abi::{Align, FieldsShape, Int, Pointer, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants};
/// The location and extra runtime properties of the place.
///
/// Typically found in a [`PlaceRef`] or an [`OperandValue::Ref`].
#[derive(Copy, Clone, Debug)]
pub struct PlaceRef<'tcx, V> {
pub struct PlaceValue<V> {
/// A pointer to the contents of the place.
pub llval: V,
/// This place's extra data if it is unsized, or `None` if null.
pub llextra: Option<V>,
/// The monomorphized type of this place, including variant information.
pub layout: TyAndLayout<'tcx>,
/// The alignment we know for this place.
pub align: Align,
}
impl<V: CodegenObject> PlaceValue<V> {
/// Constructor for the ordinary case of `Sized` types.
///
/// Sets `llextra` to `None`.
pub fn new_sized(llval: V, align: Align) -> PlaceValue<V> {
PlaceValue { llval, llextra: None, align }
}
}
#[derive(Copy, Clone, Debug)]
pub struct PlaceRef<'tcx, V> {
/// The location and extra runtime properties of the place.
pub val: PlaceValue<V>,
/// The monomorphized type of this place, including variant information.
///
/// You probably shouldn't use the alignment from this layout;
/// rather you should use the `.val.align` of the actual place,
/// which might be different from the type's normal alignment.
pub layout: TyAndLayout<'tcx>,
}
impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
assert!(layout.is_sized());
PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
let val = PlaceValue::new_sized(llval, layout.align.abi);
PlaceRef { val, layout }
}
pub fn new_sized_aligned(
@ -39,7 +62,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
align: Align,
) -> PlaceRef<'tcx, V> {
assert!(layout.is_sized());
PlaceRef { llval, llextra: None, layout, align }
let val = PlaceValue::new_sized(llval, align);
PlaceRef { val, layout }
}
// FIXME(eddyb) pass something else for the name so no work is done
@ -78,7 +102,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
if let FieldsShape::Array { count, .. } = self.layout.fields {
if self.layout.is_unsized() {
assert_eq!(count, 0);
self.llextra.unwrap()
self.val.llextra.unwrap()
} else {
cx.const_usize(count)
}
@ -97,21 +121,27 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
) -> Self {
let field = self.layout.field(bx.cx(), ix);
let offset = self.layout.fields.offset(ix);
let effective_field_align = self.align.restrict_for_offset(offset);
let effective_field_align = self.val.align.restrict_for_offset(offset);
// `simple` is called when we don't need to adjust the offset to
// the dynamic alignment of the field.
let mut simple = || {
let llval = if offset.bytes() == 0 {
self.llval
self.val.llval
} else {
bx.inbounds_ptradd(self.llval, bx.const_usize(offset.bytes()))
bx.inbounds_ptradd(self.val.llval, bx.const_usize(offset.bytes()))
};
PlaceRef {
llval,
llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
val: PlaceValue {
llval,
llextra: if bx.cx().type_has_metadata(field.ty) {
self.val.llextra
} else {
None
},
align: effective_field_align,
},
layout: field,
align: effective_field_align,
}
};
@ -142,7 +172,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
// The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
// the `y` field has 16-bit alignment.
let meta = self.llextra;
let meta = self.val.llextra;
let unaligned_offset = bx.cx().const_usize(offset.bytes());
@ -164,9 +194,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
debug!("struct_field_ptr: DST field offset: {:?}", offset);
// Adjust pointer.
let ptr = bx.inbounds_ptradd(self.llval, offset);
PlaceRef { llval: ptr, llextra: self.llextra, layout: field, align: effective_field_align }
let ptr = bx.inbounds_ptradd(self.val.llval, offset);
let val =
PlaceValue { llval: ptr, llextra: self.val.llextra, align: effective_field_align };
PlaceRef { val, layout: field }
}
/// Obtain the actual discriminant of a value.
@ -312,10 +343,9 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
let ptr = self.project_field(bx, tag_field);
let to =
self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
bx.store(
bx.store_to_place(
bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
ptr.llval,
ptr.align,
ptr.val,
);
}
Variants::Multiple {
@ -357,14 +387,16 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
};
PlaceRef {
llval: bx.inbounds_gep(
bx.cx().backend_type(self.layout),
self.llval,
&[bx.cx().const_usize(0), llindex],
),
llextra: None,
val: PlaceValue {
llval: bx.inbounds_gep(
bx.cx().backend_type(self.layout),
self.val.llval,
&[bx.cx().const_usize(0), llindex],
),
llextra: None,
align: self.val.align.restrict_for_offset(offset),
},
layout,
align: self.align.restrict_for_offset(offset),
}
}
@ -389,11 +421,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
}
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
bx.lifetime_start(self.llval, self.layout.size);
bx.lifetime_start(self.val.llval, self.layout.size);
}
pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
bx.lifetime_end(self.llval, self.layout.size);
bx.lifetime_end(self.val.llval, self.layout.size);
}
}
@ -461,8 +493,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if subslice.layout.is_unsized() {
assert!(from_end, "slice subslices should be `from_end`");
subslice.llextra =
Some(bx.sub(cg_base.llextra.unwrap(), bx.cx().const_usize(from + to)));
subslice.val.llextra = Some(
bx.sub(cg_base.val.llextra.unwrap(), bx.cx().const_usize(from + to)),
);
}
subslice

View File

@ -68,13 +68,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
base::coerce_unsized_into(bx, scratch, dest);
scratch.storage_dead(bx);
}
OperandValue::Ref(llref, None, align) => {
let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
OperandValue::Ref(val) => {
if val.llextra.is_some() {
bug!("unsized coercion on an unsized rvalue");
}
let source = PlaceRef { val, layout: operand.layout };
base::coerce_unsized_into(bx, source, dest);
}
OperandValue::Ref(_, Some(_), _) => {
bug!("unsized coercion on an unsized rvalue");
}
OperandValue::ZeroSized => {
bug!("unsized coercion on a ZST rvalue");
}
@ -95,20 +95,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
if let OperandValue::Immediate(v) = cg_elem.val {
let start = dest.llval;
let start = dest.val.llval;
let size = bx.const_usize(dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
if bx.cx().const_to_opt_u128(v, false) == Some(0) {
let fill = bx.cx().const_u8(0);
bx.memset(start, fill, size, dest.align, MemFlags::empty());
bx.memset(start, fill, size, dest.val.align, MemFlags::empty());
return;
}
// Use llvm.memset.p0i8.* to initialize byte arrays
let v = bx.from_immediate(v);
if bx.cx().val_ty(v) == bx.cx().type_i8() {
bx.memset(start, v, size, dest.align, MemFlags::empty());
bx.memset(start, v, size, dest.val.align, MemFlags::empty());
return;
}
}
@ -182,7 +182,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Immediate(..) | OperandValue::Pair(..) => {
// When we have immediate(s), the alignment of the source is irrelevant,
// so we can store them using the destination's alignment.
src.val.store(bx, PlaceRef::new_sized_aligned(dst.llval, src.layout, dst.align));
src.val.store(
bx,
PlaceRef::new_sized_aligned(dst.val.llval, src.layout, dst.val.align),
);
}
}
}
@ -217,10 +220,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let cast_kind = self.value_kind(cast);
match operand.val {
OperandValue::Ref(ptr, meta, align) => {
debug_assert_eq!(meta, None);
OperandValue::Ref(source_place_val) => {
debug_assert_eq!(source_place_val.llextra, None);
debug_assert!(matches!(operand_kind, OperandValueKind::Ref));
let fake_place = PlaceRef::new_sized_aligned(ptr, cast, align);
let fake_place = PlaceRef { val: source_place_val, layout: cast };
Some(bx.load_operand(fake_place).val)
}
OperandValue::ZeroSized => {
@ -375,7 +378,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) {
debug!(
"codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
indirect_dest.llval, rvalue
indirect_dest.val.llval, rvalue
);
match *rvalue {
@ -487,7 +490,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::CastKind::DynStar => {
let (lldata, llextra) = match operand.val {
OperandValue::Ref(_, _, _) => todo!(),
OperandValue::Ref(..) => todo!(),
OperandValue::Immediate(v) => (v, None),
OperandValue::Pair(v, l) => (v, Some(l)),
OperandValue::ZeroSized => bug!("ZST -- which is not PointerLike -- in DynStar"),
@ -765,9 +768,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Note: places are indirect, so storing the `llval` into the
// destination effectively creates a reference.
let val = if !bx.cx().type_has_metadata(ty) {
OperandValue::Immediate(cg_place.llval)
OperandValue::Immediate(cg_place.val.llval)
} else {
OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
OperandValue::Pair(cg_place.val.llval, cg_place.val.llextra.unwrap())
};
OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
}

View File

@ -12,7 +12,7 @@ use crate::common::{
AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
};
use crate::mir::operand::{OperandRef, OperandValue};
use crate::mir::place::PlaceRef;
use crate::mir::place::{PlaceRef, PlaceValue};
use crate::MemFlags;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
@ -156,6 +156,10 @@ pub trait BuilderMethods<'a, 'tcx>:
order: AtomicOrdering,
size: Size,
) -> Self::Value;
fn load_from_place(&mut self, ty: Self::Type, place: PlaceValue<Self::Value>) -> Self::Value {
debug_assert_eq!(place.llextra, None);
self.load(ty, place.llval, place.align)
}
fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
-> OperandRef<'tcx, Self::Value>;
@ -171,6 +175,10 @@ pub trait BuilderMethods<'a, 'tcx>:
fn nonnull_metadata(&mut self, load: Self::Value);
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
fn store_to_place(&mut self, val: Self::Value, place: PlaceValue<Self::Value>) -> Self::Value {
debug_assert_eq!(place.llextra, None);
self.store(val, place.llval, place.align)
}
fn store_with_flags(
&mut self,
val: Self::Value,
@ -290,14 +298,14 @@ pub trait BuilderMethods<'a, 'tcx>:
src: PlaceRef<'tcx, Self::Value>,
flags: MemFlags,
) {
debug_assert!(src.llextra.is_none(), "cannot directly copy from unsized values");
debug_assert!(dst.llextra.is_none(), "cannot directly copy into unsized values");
debug_assert!(src.val.llextra.is_none(), "cannot directly copy from unsized values");
debug_assert!(dst.val.llextra.is_none(), "cannot directly copy into unsized values");
debug_assert_eq!(dst.layout.size, src.layout.size);
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let ty = self.backend_type(dst.layout);
let val = self.load(ty, src.llval, src.align);
self.store_with_flags(val, dst.llval, dst.align, flags);
let val = self.load_from_place(ty, src.val);
self.store_with_flags(val, dst.val.llval, dst.val.align, flags);
} else if self.sess().opts.optimize == OptLevel::No && self.is_backend_immediate(dst.layout)
{
// If we're not optimizing, the aliasing information from `memcpy`
@ -306,7 +314,7 @@ pub trait BuilderMethods<'a, 'tcx>:
temp.val.store_with_flags(self, dst, flags);
} else if !dst.layout.is_zst() {
let bytes = self.const_usize(dst.layout.size.bytes());
self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags);
self.memcpy(dst.val.llval, dst.val.align, src.val.llval, src.val.align, bytes, flags);
}
}