diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 43cc46cfe68..ce2c18c68a8 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -974,7 +974,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { &mut self, place: PlaceRef<'tcx, RValue<'gcc>>, ) -> OperandRef<'tcx, RValue<'gcc>> { - assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { return OperandRef::zero_sized(place.layout); @@ -999,10 +999,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } } - let val = if let Some(llextra) = place.llextra { - OperandValue::Ref(place.llval, Some(llextra), place.align) + let val = if let Some(llextra) = place.val.llextra { + OperandValue::Ref(place.val.llval, Some(llextra), place.val.align) } else if place.layout.is_gcc_immediate() { - let load = self.load(place.layout.gcc_type(self), place.llval, place.align); + let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align); if let abi::Abi::Scalar(ref scalar) = place.layout.abi { scalar_load_metadata(self, load, scalar); } @@ -1012,9 +1012,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let mut load = |i, scalar: &abi::Scalar, align| { let llptr = if i == 0 { - place.llval + place.val.llval } else { - self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes())) + self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes())) }; let llty = place.layout.scalar_pair_element_gcc_type(self, i); let load = self.load(llty, llptr, align); @@ -1027,11 +1027,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { }; OperandValue::Pair( - load(0, a, place.align), - load(1, b, place.align.restrict_for_offset(b_offset)), + load(0, a, place.val.align), + load(1, b, place.val.align.restrict_for_offset(b_offset)), ) } else { - OperandValue::Ref(place.llval, None, place.align) + OperandValue::Ref(place.val.llval, None, place.val.align) }; OperandRef { val, layout: place.layout } @@ -1045,8 +1045,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { ) { let zero = self.const_usize(0); let count = self.const_usize(count); - let start = dest.project_index(self, zero).llval; - let end = dest.project_index(self, count).llval; + let start = dest.project_index(self, zero).val.llval; + let end = dest.project_index(self, count).val.llval; let header_bb = self.append_sibling_block("repeat_loop_header"); let body_bb = self.append_sibling_block("repeat_loop_body"); @@ -1064,7 +1064,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.cond_br(keep_going, body_bb, next_bb); self.switch_to_block(body_bb); - let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); + let align = dest.val.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align)); let next = self.inbounds_gep( diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index cebd45c09aa..57cb81a8ece 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -354,7 +354,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let block = self.llbb(); let extended_asm = block.add_extended_asm(None, ""); - extended_asm.add_input_operand(None, "r", result.llval); + extended_asm.add_input_operand(None, "r", result.val.llval); extended_asm.add_clobber("memory"); extended_asm.set_volatile_flag(true); @@ -388,8 +388,8 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { if !fn_abi.ret.is_ignore() { if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode { let ptr_llty = self.type_ptr_to(ty.gcc_type(self)); - let ptr = self.pointercast(result.llval, ptr_llty); - self.store(llval, ptr, result.align); + let ptr = self.pointercast(result.val.llval, ptr_llty); + self.store(llval, ptr, result.val.align); } else { OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) .val @@ -511,7 +511,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx)); - let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); + let cast_dst = bx.pointercast(dst.val.llval, cast_ptr_llty); bx.store(val, cast_dst, self.layout.align.abi); } else { // The actual return type is a struct, but the ABI @@ -539,7 +539,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // ... and then memcpy it to the intended destination. bx.memcpy( - dst.llval, + dst.val.llval, self.layout.align.abi, llscratch, scratch_align, diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index 60361a44c2d..6039a4aaf01 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -82,7 +82,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let place = PlaceRef::alloca(bx, args[0].layout); args[0].val.store(bx, place); let int_ty = bx.type_ix(expected_bytes * 8); - let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty)); + let ptr = bx.pointercast(place.val.llval, bx.cx.type_ptr_to(int_ty)); bx.load(int_ty, ptr, Align::ONE) } _ => return_error!(InvalidMonomorphization::InvalidBitmask { diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index f918facc86d..cd9e5515958 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -233,7 +233,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { bx.store(val, llscratch, scratch_align); // ... and then memcpy it to the intended destination. bx.memcpy( - dst.llval, + dst.val.llval, self.layout.align.abi, llscratch, scratch_align, diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index b7235972204..6819c22698c 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -535,7 +535,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { panic!("unsized locals must not be `extern` types"); } } - assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { return OperandRef::zero_sized(place.layout); @@ -579,13 +579,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - let val = if let Some(llextra) = place.llextra { - OperandValue::Ref(place.llval, Some(llextra), place.align) + let val = if let Some(llextra) = place.val.llextra { + OperandValue::Ref(place.val.llval, Some(llextra), place.val.align) } else if place.layout.is_llvm_immediate() { let mut const_llval = None; let llty = place.layout.llvm_type(self); unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) { if llvm::LLVMIsGlobalConstant(global) == llvm::True { if let Some(init) = llvm::LLVMGetInitializer(global) { if self.val_ty(init) == llty { @@ -596,7 +596,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } let llval = const_llval.unwrap_or_else(|| { - let load = self.load(llty, place.llval, place.align); + let load = self.load(llty, place.val.llval, place.val.align); if let abi::Abi::Scalar(scalar) = place.layout.abi { scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO); } @@ -608,9 +608,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let mut load = |i, scalar: abi::Scalar, layout, align, offset| { let llptr = if i == 0 { - place.llval + place.val.llval } else { - self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes())) + self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes())) }; let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let load = self.load(llty, llptr, align); @@ -619,11 +619,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }; OperandValue::Pair( - load(0, a, place.layout, place.align, Size::ZERO), - load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset), + load(0, a, place.layout, place.val.align, Size::ZERO), + load(1, b, place.layout, place.val.align.restrict_for_offset(b_offset), b_offset), ) } else { - OperandValue::Ref(place.llval, None, place.align) + OperandValue::Ref(place.val.llval, None, place.val.align) }; OperandRef { val, layout: place.layout } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index dc52dd156b7..a661a7f489d 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -264,7 +264,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { llvm::LLVMSetAlignment(load, align); } if !result.layout.is_zst() { - self.store(load, result.llval, result.align); + self.store(load, result.val.llval, result.val.align); } return Ok(()); } @@ -428,7 +428,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { sym::black_box => { args[0].val.store(self, result); - let result_val_span = [result.llval]; + let result_val_span = [result.val.llval]; // We need to "use" the argument in some way LLVM can't introspect, and on // targets that support it we can typically leverage inline assembly to do // this. LLVM's interpretation of inline assembly is that it's, well, a black @@ -482,7 +482,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { if !fn_abi.ret.is_ignore() { if let PassMode::Cast { .. } = &fn_abi.ret.mode { - self.store(llval, result.llval, result.align); + self.store(llval, result.val.llval, result.val.align); } else { OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) .val @@ -1065,7 +1065,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let place = PlaceRef::alloca(bx, args[0].layout); args[0].val.store(bx, place); let int_ty = bx.type_ix(expected_bytes * 8); - bx.load(int_ty, place.llval, Align::ONE) + bx.load(int_ty, place.val.llval, Align::ONE) } _ => return_error!(InvalidMonomorphization::InvalidBitmask { span, diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index c3137f0628e..fbcdea47ebf 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -1,6 +1,6 @@ use super::operand::OperandRef; use super::operand::OperandValue::{Immediate, Pair, Ref, ZeroSized}; -use super::place::PlaceRef; +use super::place::{PlaceRef, PlaceValue}; use super::{CachedLlbb, FunctionCx, LocalRef}; use crate::base; @@ -242,7 +242,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { bx.switch_to_block(fx.llbb(target)); fx.set_debug_loc(bx, self.terminator.source_info); for tmp in copied_constant_arguments { - bx.lifetime_end(tmp.llval, tmp.layout.size); + bx.lifetime_end(tmp.val.llval, tmp.layout.size); } fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret); } @@ -256,7 +256,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { if let Some((ret_dest, target)) = destination { for tmp in copied_constant_arguments { - bx.lifetime_end(tmp.llval, tmp.layout.size); + bx.lifetime_end(tmp.val.llval, tmp.layout.size); } fx.store_return(bx, ret_dest, &fn_abi.ret, llret); self.funclet_br(fx, bx, target, mergeable_succ) @@ -431,7 +431,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let va_list_arg_idx = self.fn_abi.args.len(); match self.locals[mir::Local::from_usize(1 + va_list_arg_idx)] { LocalRef::Place(va_list) => { - bx.va_end(va_list.llval); + bx.va_end(va_list.val.llval); } _ => bug!("C-variadic function must have a `VaList` place"), } @@ -467,7 +467,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { LocalRef::Operand(op) => op, LocalRef::PendingOperand => bug!("use of return before def"), LocalRef::Place(cg_place) => OperandRef { - val: Ref(cg_place.llval, None, cg_place.align), + val: Ref(cg_place.val.llval, None, cg_place.val.align), layout: cg_place.layout, }, LocalRef::UnsizedPlace(_) => bug!("return type must be sized"), @@ -476,7 +476,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Immediate(_) | Pair(..) => { let scratch = PlaceRef::alloca(bx, self.fn_abi.ret.layout); op.val.store(bx, scratch); - scratch.llval + scratch.val.llval } Ref(llval, _, align) => { assert_eq!(align, op.layout.align.abi, "return place is unaligned!"); @@ -512,11 +512,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let place = self.codegen_place(bx, location.as_ref()); let (args1, args2); - let mut args = if let Some(llextra) = place.llextra { - args2 = [place.llval, llextra]; + let mut args = if let Some(llextra) = place.val.llextra { + args2 = [place.val.llval, llextra]; &args2[..] } else { - args1 = [place.llval]; + args1 = [place.val.llval]; &args1[..] }; let (drop_fn, fn_abi, drop_instance) = @@ -918,7 +918,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let dest = match ret_dest { _ if fn_abi.ret.is_indirect() => llargs[0], ReturnDest::Nothing => bx.const_undef(bx.type_ptr()), - ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, + ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.val.llval, ReturnDest::DirectOperand(_) => { bug!("Cannot use direct operand with an intrinsic call") } @@ -951,7 +951,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match Self::codegen_intrinsic_call(bx, instance, fn_abi, &args, dest, span) { Ok(()) => { if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval); + self.store_return(bx, ret_dest, &fn_abi.ret, dst.val.llval); } return if let Some(target) = target { @@ -1058,16 +1058,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { span_bug!(span, "can't codegen a virtual call on {:#?}", op); } let place = op.deref(bx.cx()); - let data_ptr = place.project_field(bx, 0); - let meta_ptr = place.project_field(bx, 1); - let meta = bx.load_operand(meta_ptr); + let data_place = place.project_field(bx, 0); + let meta_place = place.project_field(bx, 1); + let meta = bx.load_operand(meta_place); llfn = Some(meth::VirtualIndex::from_index(idx).get_fn( bx, meta.immediate(), op.layout.ty, fn_abi, )); - llargs.push(data_ptr.llval); + llargs.push(data_place.val.llval); continue; } _ => { @@ -1082,9 +1082,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { (&mir::Operand::Copy(_), Ref(_, None, _)) | (&mir::Operand::Constant(_), Ref(_, None, _)) => { let tmp = PlaceRef::alloca(bx, op.layout); - bx.lifetime_start(tmp.llval, tmp.layout.size); + bx.lifetime_start(tmp.val.llval, tmp.layout.size); op.val.store(bx, tmp); - op.val = Ref(tmp.llval, None, tmp.align); + op.val = Ref(tmp.val.llval, None, tmp.val.align); copied_constant_arguments.push(tmp); } _ => {} @@ -1450,12 +1450,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align); op.val.store(bx, scratch); - (scratch.llval, scratch.align, true) + (scratch.val.llval, scratch.val.align, true) } PassMode::Cast { .. } => { let scratch = PlaceRef::alloca(bx, arg.layout); op.val.store(bx, scratch); - (scratch.llval, scratch.align, true) + (scratch.val.llval, scratch.val.align, true) } _ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false), }, @@ -1470,9 +1470,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // alignment requirements may be higher than the type's alignment, so copy // to a higher-aligned alloca. let scratch = PlaceRef::alloca_aligned(bx, arg.layout, required_align); - let op_place = PlaceRef { llval, llextra, layout: op.layout, align }; + let op_place = PlaceRef { + val: PlaceValue { llval, llextra, align }, + layout: op.layout, + }; bx.typed_place_copy(scratch, op_place); - (scratch.llval, scratch.align, true) + (scratch.val.llval, scratch.val.align, true) } else { (llval, align, true) } @@ -1490,7 +1493,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // a pointer for `repr(C)` structs even when empty, so get // one from an `alloca` (which can be left uninitialized). let scratch = PlaceRef::alloca(bx, arg.layout); - (scratch.llval, scratch.align, true) + (scratch.val.llval, scratch.val.align, true) } _ => bug!("ZST {op:?} wasn't ignored, but was passed with abi {arg:?}"), }, @@ -1782,7 +1785,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // but the calling convention has an indirect return. let tmp = PlaceRef::alloca(bx, fn_ret.layout); tmp.storage_live(bx); - llargs.push(tmp.llval); + llargs.push(tmp.val.llval); ReturnDest::IndirectOperand(tmp, index) } else if intrinsic.is_some() { // Currently, intrinsics always need a location to store @@ -1803,7 +1806,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.codegen_place(bx, mir::PlaceRef { local: dest.local, projection: dest.projection }) }; if fn_ret.is_indirect() { - if dest.align < dest.layout.align.abi { + if dest.val.align < dest.layout.align.abi { // Currently, MIR code generation does not create calls // that store directly to fields of packed structs (in // fact, the calls it creates write only to temps). @@ -1812,7 +1815,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // to create a temporary. span_bug!(self.mir.span, "can't directly store to unaligned value"); } - llargs.push(dest.llval); + llargs.push(dest.val.llval); ReturnDest::Nothing } else { ReturnDest::Store(dest) diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index 0387c430d32..3f3c738984e 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -252,7 +252,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // at least for the cases which LLVM handles correctly. let spill_slot = PlaceRef::alloca(bx, operand.layout); if let Some(name) = name { - bx.set_var_name(spill_slot.llval, &(name + ".dbg.spill")); + bx.set_var_name(spill_slot.val.llval, &(name + ".dbg.spill")); } operand.val.store(bx, spill_slot); spill_slot @@ -331,7 +331,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if let Some(name) = &name { match local_ref { LocalRef::Place(place) | LocalRef::UnsizedPlace(place) => { - bx.set_var_name(place.llval, name); + bx.set_var_name(place.val.llval, name); } LocalRef::Operand(operand) => match operand.val { OperandValue::Ref(x, ..) | OperandValue::Immediate(x) => { @@ -417,16 +417,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let ptr_ty = Ty::new_mut_ptr(bx.tcx(), place.layout.ty); let ptr_layout = bx.layout_of(ptr_ty); let alloca = PlaceRef::alloca(bx, ptr_layout); - bx.set_var_name(alloca.llval, &(var.name.to_string() + ".dbg.spill")); + bx.set_var_name(alloca.val.llval, &(var.name.to_string() + ".dbg.spill")); // Write the pointer to the variable - bx.store(place.llval, alloca.llval, alloca.align); + bx.store(place.val.llval, alloca.val.llval, alloca.val.align); // Point the debug info to `*alloca` for the current variable bx.dbg_var_addr( dbg_var, dbg_loc, - alloca.llval, + alloca.val.llval, Size::ZERO, &[Size::ZERO], var.fragment, @@ -435,7 +435,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.dbg_var_addr( dbg_var, dbg_loc, - base.llval, + base.val.llval, direct_offset, &indirect_offsets, var.fragment, @@ -553,7 +553,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let base = Self::spill_operand_to_stack(operand, Some(var.name.to_string()), bx); - bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[], fragment); + bx.dbg_var_addr( + dbg_var, + dbg_loc, + base.val.llval, + Size::ZERO, + &[], + fragment, + ); } } } diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 3e6cf0ece29..c1df2467ea9 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -387,9 +387,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let success = bx.from_immediate(success); let dest = result.project_field(bx, 0); - bx.store(val, dest.llval, dest.align); + bx.store(val, dest.val.llval, dest.val.align); let dest = result.project_field(bx, 1); - bx.store(success, dest.llval, dest.align); + bx.store(success, dest.val.llval, dest.val.align); } else { invalid_monomorphization(ty); } @@ -511,7 +511,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if !fn_abi.ret.is_ignore() { if let PassMode::Cast { .. } = &fn_abi.ret.mode { - bx.store(llval, result.llval, result.align); + bx.store(llval, result.val.llval, result.val.align); } else { OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) .val diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 1f4473d2ec4..d30ce1fc9fa 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -336,7 +336,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() { let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); - bx.va_start(va_list.llval); + bx.va_start(va_list.val.llval); return LocalRef::Place(va_list); } diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index ac38b91c5e0..d91869d622e 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -1,4 +1,4 @@ -use super::place::PlaceRef; +use super::place::{PlaceRef, PlaceValue}; use super::{FunctionCx, LocalRef}; use crate::size_of_val; @@ -221,7 +221,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { OperandValue::ZeroSized => bug!("Deref of ZST operand {:?}", self), }; let layout = cx.layout_of(projected_ty); - PlaceRef { llval: llptr, llextra, layout, align: layout.align.abi } + let val = PlaceValue { llval: llptr, llextra, align: layout.align.abi }; + PlaceRef { val, layout } } /// If this operand is a `Pair`, we return an aggregate with the two values. @@ -409,10 +410,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { // Avoid generating stores of zero-sized values, because the only way to have a zero-sized // value is through `undef`/`poison`, and the store itself is useless. } - OperandValue::Ref(llval, llextra @ None, source_align) => { + OperandValue::Ref(llval, None, source_align) => { assert!(dest.layout.is_sized(), "cannot directly store unsized values"); - let source_place = - PlaceRef { llval, llextra, align: source_align, layout: dest.layout }; + let source_place = PlaceRef { + val: PlaceValue::new_sized(llval, source_align), + layout: dest.layout, + }; bx.typed_place_copy_with_flags(dest, source_place, flags); } OperandValue::Ref(_, Some(_), _) => { @@ -420,7 +423,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { } OperandValue::Immediate(s) => { let val = bx.from_immediate(s); - bx.store_with_flags(val, dest.llval, dest.align, flags); + bx.store_with_flags(val, dest.val.llval, dest.val.align, flags); } OperandValue::Pair(a, b) => { let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else { @@ -429,12 +432,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi); let val = bx.from_immediate(a); - let align = dest.align; - bx.store_with_flags(val, dest.llval, align, flags); + let align = dest.val.align; + bx.store_with_flags(val, dest.val.llval, align, flags); - let llptr = bx.inbounds_ptradd(dest.llval, bx.const_usize(b_offset.bytes())); + let llptr = bx.inbounds_ptradd(dest.val.llval, bx.const_usize(b_offset.bytes())); let val = bx.from_immediate(b); - let align = dest.align.restrict_for_offset(b_offset); + let align = dest.val.align.restrict_for_offset(b_offset); bx.store_with_flags(val, llptr, align, flags); } } diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 1ec6c351e25..24da5ca435d 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -12,25 +12,48 @@ use rustc_middle::ty::{self, Ty}; use rustc_target::abi::{Align, FieldsShape, Int, Pointer, TagEncoding}; use rustc_target::abi::{VariantIdx, Variants}; +/// The location and extra runtime properties of the place. +/// +/// Typically found in a [`PlaceRef`] or an [`OperandValue::Ref`]. #[derive(Copy, Clone, Debug)] -pub struct PlaceRef<'tcx, V> { +pub struct PlaceValue { /// A pointer to the contents of the place. pub llval: V, /// This place's extra data if it is unsized, or `None` if null. pub llextra: Option, - /// The monomorphized type of this place, including variant information. - pub layout: TyAndLayout<'tcx>, - /// The alignment we know for this place. pub align: Align, } +impl PlaceValue { + /// Constructor for the ordinary case of `Sized` types. + /// + /// Sets `llextra` to `None`. + pub fn new_sized(llval: V, align: Align) -> PlaceValue { + PlaceValue { llval, llextra: None, align } + } +} + +#[derive(Copy, Clone, Debug)] +pub struct PlaceRef<'tcx, V> { + /// The location and extra runtime properties of the place. + pub val: PlaceValue, + + /// The monomorphized type of this place, including variant information. + /// + /// You probably shouldn't use the alignment from this layout; + /// rather you should use the `.val.align` of the actual place, + /// which might be different from the type's normal alignment. + pub layout: TyAndLayout<'tcx>, +} + impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> { assert!(layout.is_sized()); - PlaceRef { llval, llextra: None, layout, align: layout.align.abi } + let val = PlaceValue::new_sized(llval, layout.align.abi); + PlaceRef { val, layout } } pub fn new_sized_aligned( @@ -39,7 +62,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { align: Align, ) -> PlaceRef<'tcx, V> { assert!(layout.is_sized()); - PlaceRef { llval, llextra: None, layout, align } + let val = PlaceValue::new_sized(llval, align); + PlaceRef { val, layout } } // FIXME(eddyb) pass something else for the name so no work is done @@ -78,7 +102,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { if let FieldsShape::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert_eq!(count, 0); - self.llextra.unwrap() + self.val.llextra.unwrap() } else { cx.const_usize(count) } @@ -97,21 +121,27 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { ) -> Self { let field = self.layout.field(bx.cx(), ix); let offset = self.layout.fields.offset(ix); - let effective_field_align = self.align.restrict_for_offset(offset); + let effective_field_align = self.val.align.restrict_for_offset(offset); // `simple` is called when we don't need to adjust the offset to // the dynamic alignment of the field. let mut simple = || { let llval = if offset.bytes() == 0 { - self.llval + self.val.llval } else { - bx.inbounds_ptradd(self.llval, bx.const_usize(offset.bytes())) + bx.inbounds_ptradd(self.val.llval, bx.const_usize(offset.bytes())) }; PlaceRef { - llval, - llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None }, + val: PlaceValue { + llval, + llextra: if bx.cx().type_has_metadata(field.ty) { + self.val.llextra + } else { + None + }, + align: effective_field_align, + }, layout: field, - align: effective_field_align, } }; @@ -142,7 +172,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { // The type `Foo>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that // the `y` field has 16-bit alignment. - let meta = self.llextra; + let meta = self.val.llextra; let unaligned_offset = bx.cx().const_usize(offset.bytes()); @@ -164,9 +194,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { debug!("struct_field_ptr: DST field offset: {:?}", offset); // Adjust pointer. - let ptr = bx.inbounds_ptradd(self.llval, offset); - - PlaceRef { llval: ptr, llextra: self.llextra, layout: field, align: effective_field_align } + let ptr = bx.inbounds_ptradd(self.val.llval, offset); + let val = + PlaceValue { llval: ptr, llextra: self.val.llextra, align: effective_field_align }; + PlaceRef { val, layout: field } } /// Obtain the actual discriminant of a value. @@ -314,8 +345,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val; bx.store( bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to), - ptr.llval, - ptr.align, + ptr.val.llval, + ptr.val.align, ); } Variants::Multiple { @@ -357,14 +388,16 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { }; PlaceRef { - llval: bx.inbounds_gep( - bx.cx().backend_type(self.layout), - self.llval, - &[bx.cx().const_usize(0), llindex], - ), - llextra: None, + val: PlaceValue { + llval: bx.inbounds_gep( + bx.cx().backend_type(self.layout), + self.val.llval, + &[bx.cx().const_usize(0), llindex], + ), + llextra: None, + align: self.val.align.restrict_for_offset(offset), + }, layout, - align: self.align.restrict_for_offset(offset), } } @@ -389,11 +422,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { } pub fn storage_live>(&self, bx: &mut Bx) { - bx.lifetime_start(self.llval, self.layout.size); + bx.lifetime_start(self.val.llval, self.layout.size); } pub fn storage_dead>(&self, bx: &mut Bx) { - bx.lifetime_end(self.llval, self.layout.size); + bx.lifetime_end(self.val.llval, self.layout.size); } } @@ -461,8 +494,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if subslice.layout.is_unsized() { assert!(from_end, "slice subslices should be `from_end`"); - subslice.llextra = - Some(bx.sub(cg_base.llextra.unwrap(), bx.cx().const_usize(from + to))); + subslice.val.llextra = Some( + bx.sub(cg_base.val.llextra.unwrap(), bx.cx().const_usize(from + to)), + ); } subslice diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index d62f560f11f..6f1a76e7834 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -95,20 +95,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } if let OperandValue::Immediate(v) = cg_elem.val { - let start = dest.llval; + let start = dest.val.llval; let size = bx.const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays if bx.cx().const_to_opt_u128(v, false) == Some(0) { let fill = bx.cx().const_u8(0); - bx.memset(start, fill, size, dest.align, MemFlags::empty()); + bx.memset(start, fill, size, dest.val.align, MemFlags::empty()); return; } // Use llvm.memset.p0i8.* to initialize byte arrays let v = bx.from_immediate(v); if bx.cx().val_ty(v) == bx.cx().type_i8() { - bx.memset(start, v, size, dest.align, MemFlags::empty()); + bx.memset(start, v, size, dest.val.align, MemFlags::empty()); return; } } @@ -182,7 +182,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValue::Immediate(..) | OperandValue::Pair(..) => { // When we have immediate(s), the alignment of the source is irrelevant, // so we can store them using the destination's alignment. - src.val.store(bx, PlaceRef::new_sized_aligned(dst.llval, src.layout, dst.align)); + src.val.store( + bx, + PlaceRef::new_sized_aligned(dst.val.llval, src.layout, dst.val.align), + ); } } } @@ -375,7 +378,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ) { debug!( "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})", - indirect_dest.llval, rvalue + indirect_dest.val.llval, rvalue ); match *rvalue { @@ -765,9 +768,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Note: places are indirect, so storing the `llval` into the // destination effectively creates a reference. let val = if !bx.cx().type_has_metadata(ty) { - OperandValue::Immediate(cg_place.llval) + OperandValue::Immediate(cg_place.val.llval) } else { - OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap()) + OperandValue::Pair(cg_place.val.llval, cg_place.val.llextra.unwrap()) }; OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) } } diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index c0281e75d9d..f1073e0bbc4 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -290,14 +290,14 @@ pub trait BuilderMethods<'a, 'tcx>: src: PlaceRef<'tcx, Self::Value>, flags: MemFlags, ) { - debug_assert!(src.llextra.is_none(), "cannot directly copy from unsized values"); - debug_assert!(dst.llextra.is_none(), "cannot directly copy into unsized values"); + debug_assert!(src.val.llextra.is_none(), "cannot directly copy from unsized values"); + debug_assert!(dst.val.llextra.is_none(), "cannot directly copy into unsized values"); debug_assert_eq!(dst.layout.size, src.layout.size); if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let ty = self.backend_type(dst.layout); - let val = self.load(ty, src.llval, src.align); - self.store_with_flags(val, dst.llval, dst.align, flags); + let val = self.load(ty, src.val.llval, src.val.align); + self.store_with_flags(val, dst.val.llval, dst.val.align, flags); } else if self.sess().opts.optimize == OptLevel::No && self.is_backend_immediate(dst.layout) { // If we're not optimizing, the aliasing information from `memcpy` @@ -306,7 +306,7 @@ pub trait BuilderMethods<'a, 'tcx>: temp.val.store_with_flags(self, dst, flags); } else if !dst.layout.is_zst() { let bytes = self.const_usize(dst.layout.size.bytes()); - self.memcpy(dst.llval, dst.align, src.llval, src.align, bytes, flags); + self.memcpy(dst.val.llval, dst.val.align, src.val.llval, src.val.align, bytes, flags); } }