cg_ssa: remove pointee types and pointercast/bitcast-of-ptr

This commit is contained in:
Erik Desjardins 2023-07-28 20:24:33 -04:00
parent b6540777fe
commit 04303cfb3a
16 changed files with 75 additions and 201 deletions

View File

@ -27,7 +27,6 @@ use rustc_codegen_ssa::traits::{
BaseTypeMethods,
BuilderMethods,
ConstMethods,
DerivedTypeMethods,
LayoutTypeMethods,
HasCodegen,
OverflowOp,

View File

@ -16,6 +16,10 @@ use crate::context::CodegenCx;
use crate::type_of::LayoutGccExt;
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
self.context.new_cast(None, val, ty)
}
pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> {
bytes_in_context(self, bytes)
}
@ -242,10 +246,6 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
const_alloc_to_gcc(self, alloc)
}
fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
self.context.new_cast(None, val, ty)
}
fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> {
if value.get_type() == self.bool_type.make_pointer() {
if let Some(pointee) = typ.get_pointee() {

View File

@ -12,7 +12,7 @@ use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
#[cfg(feature="master")]
use rustc_codegen_ssa::traits::{DerivedTypeMethods, MiscMethods};
use rustc_codegen_ssa::traits::MiscMethods;
use rustc_codegen_ssa::errors::InvalidMonomorphization;
use rustc_middle::bug;
use rustc_middle::ty::{self, Instance, Ty};

View File

@ -54,6 +54,23 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
self.u128_type
}
pub fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
ty.make_pointer()
}
pub fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
// TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE?
ty.make_pointer()
}
pub fn type_i8p(&self) -> Type<'gcc> {
self.type_ptr_to(self.type_i8())
}
pub fn type_i8p_ext(&self, address_space: AddressSpace) -> Type<'gcc> {
self.type_ptr_to_ext(self.type_i8(), address_space)
}
pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> {
// FIXME(eddyb) We could find a better approximation if ity.align < align.
let ity = Integer::approximate_align(self, align);
@ -149,13 +166,12 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
}
}
fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
ty.make_pointer()
fn type_ptr(&self) -> Type<'gcc> {
self.type_ptr_to(self.type_void())
}
fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
// TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE?
ty.make_pointer()
fn type_ptr_ext(&self, address_space: AddressSpace) -> Type<'gcc> {
self.type_ptr_to_ext(self.type_void(), address_space)
}
fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {

View File

@ -1,6 +1,6 @@
//! Code that is useful in various codegen modules.
use crate::consts::{self, const_alloc_to_llvm};
use crate::consts::const_alloc_to_llvm;
pub use crate::context::CodegenCx;
use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
use crate::type_::Type;
@ -304,10 +304,6 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
const_alloc_to_llvm(self, alloc)
}
fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
consts::ptrcast(val, ty)
}
fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
self.const_bitcast(val, ty)
}

View File

@ -193,10 +193,6 @@ fn check_and_apply_linkage<'ll, 'tcx>(
}
}
pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstPointerCast(val, ty) }
}
impl<'ll> CodegenCx<'ll, '_> {
pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstBitCast(val, ty) }

View File

@ -69,14 +69,6 @@ impl<'ll> CodegenCx<'ll, '_> {
unsafe { llvm::LLVMVectorType(ty, len as c_uint) }
}
pub(crate) fn type_ptr(&self) -> &'ll Type {
self.type_ptr_ext(AddressSpace::DATA)
}
pub(crate) fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type {
unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) }
}
pub(crate) fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> {
unsafe {
let n_args = llvm::LLVMCountParamTypes(ty) as usize;
@ -191,12 +183,12 @@ impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() }
}
fn type_ptr_to(&self, _ty: &'ll Type) -> &'ll Type {
self.type_ptr()
fn type_ptr(&self) -> &'ll Type {
self.type_ptr_ext(AddressSpace::DATA)
}
fn type_ptr_to_ext(&self, _ty: &'ll Type, address_space: AddressSpace) -> &'ll Type {
self.type_ptr_ext(address_space)
fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type {
unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) }
}
fn element_type(&self, ty: &'ll Type) -> &'ll Type {

View File

@ -165,50 +165,27 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
if let Some(entry_idx) = vptr_entry_idx {
let ptr_ty = cx.type_i8p();
let ptr_ty = cx.type_ptr();
let ptr_align = cx.tcx().data_layout.pointer_align.abi;
let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
let gep = bx.inbounds_gep(
ptr_ty,
llvtable,
old_info,
&[bx.const_usize(u64::try_from(entry_idx).unwrap())],
);
let new_vptr = bx.load(ptr_ty, gep, ptr_align);
bx.nonnull_metadata(new_vptr);
// VTable loads are invariant.
bx.set_invariant_load(new_vptr);
bx.pointercast(new_vptr, vtable_ptr_ty)
new_vptr
} else {
old_info
}
}
(_, &ty::Dynamic(ref data, _, target_dyn_kind)) => {
let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
}
(_, &ty::Dynamic(ref data, _, _)) => meth::get_vtable(cx, source, data.principal()),
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
}
}
// Returns the vtable pointer type of a `dyn` or `dyn*` type
fn vtable_ptr_ty<'tcx, Cx: CodegenMethods<'tcx>>(
cx: &Cx,
target: Ty<'tcx>,
kind: ty::DynKind,
) -> <Cx as BackendTypes>::Type {
cx.scalar_pair_element_backend_type(
cx.layout_of(match kind {
// vtable is the second field of `*mut dyn Trait`
ty::Dyn => Ty::new_mut_ptr(cx.tcx(), target),
// vtable is the second field of `dyn* Trait`
ty::DynStar => target,
}),
1,
true,
)
}
/// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
@ -222,8 +199,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
(&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
| (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
(bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
(src, unsized_info(bx, a, b, old_info))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
@ -248,11 +224,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
assert_eq!(result, None);
result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
}
let (lldata, llextra) = result.unwrap();
let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
(bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
result.unwrap()
}
_ => bug!("unsize_ptr: called on bad types"),
}
@ -271,11 +243,9 @@ pub fn cast_to_dyn_star<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
"destination type must be a dyn*"
);
// FIXME(dyn-star): We can remove this when all supported LLVMs use opaque ptrs only.
let unit_ptr = bx.cx().type_ptr_to(bx.cx().type_struct(&[], false));
let src = match bx.cx().type_kind(bx.cx().backend_type(src_ty_and_layout)) {
TypeKind::Pointer => bx.pointercast(src, unit_ptr),
TypeKind::Integer => bx.inttoptr(src, unit_ptr),
TypeKind::Pointer => src,
TypeKind::Integer => bx.inttoptr(src, bx.type_ptr()),
// FIXME(dyn-star): We probably have to do a bitcast first, then inttoptr.
kind => bug!("unexpected TypeKind for left-hand side of `dyn*` cast: {kind:?}"),
};
@ -398,11 +368,6 @@ pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
if flags == MemFlags::empty()
&& let Some(bty) = bx.cx().scalar_copy_backend_type(layout)
{
// I look forward to only supporting opaque pointers
let pty = bx.type_ptr_to(bty);
let src = bx.pointercast(src, pty);
let dst = bx.pointercast(dst, pty);
let temp = bx.load(bty, src, src_align);
bx.store(temp, dst, dst_align);
} else {
@ -456,7 +421,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
// depending on whether the target needs `argc` and `argv` to be passed in.
let llfty = if cx.sess().target.main_needs_argc_argv {
cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
cx.type_func(&[cx.type_int(), cx.type_ptr()], cx.type_int())
} else {
cx.type_func(&[], cx.type_int())
};
@ -490,7 +455,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx.insert_reference_to_gdb_debug_scripts_section_global();
let isize_ty = cx.type_isize();
let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
let ptr_ty = cx.type_ptr();
let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type {
@ -509,12 +474,11 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let i8_ty = cx.type_i8();
let arg_sigpipe = bx.const_u8(sigpipe);
let start_ty =
cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty, i8_ty], isize_ty);
let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, ptr_ty, i8_ty], isize_ty);
(start_fn, start_ty, vec![rust_main, arg_argc, arg_argv, arg_sigpipe])
} else {
debug!("using user-defined start fn");
let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
let start_ty = cx.type_func(&[isize_ty, ptr_ty], isize_ty);
(rust_main, start_ty, vec![arg_argc, arg_argv])
};
@ -541,7 +505,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
} else {
// The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
let arg_argc = bx.const_int(cx.type_int(), 0);
let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
let arg_argv = bx.const_null(cx.type_ptr());
(arg_argc, arg_argv)
}
}

View File

@ -23,7 +23,6 @@ impl<'a, 'tcx> VirtualIndex {
// Load the data pointer from the object.
debug!("get_fn({llvtable:?}, {ty:?}, {self:?})");
let llty = bx.fn_ptr_backend_type(fn_abi);
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
if bx.cx().sess().opts.unstable_opts.virtual_function_elimination
&& bx.cx().sess().lto() == Lto::Fat
@ -33,7 +32,7 @@ impl<'a, 'tcx> VirtualIndex {
.unwrap();
let vtable_byte_offset = self.0 * bx.data_layout().pointer_size.bytes();
let func = bx.type_checked_load(llvtable, vtable_byte_offset, typeid);
bx.pointercast(func, llty)
func
} else {
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
@ -54,7 +53,6 @@ impl<'a, 'tcx> VirtualIndex {
debug!("get_int({:?}, {:?})", llvtable, self);
let llty = bx.type_isize();
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
let usize_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
let ptr = bx.load(llty, gep, usize_align);

View File

@ -439,8 +439,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"),
};
let ty = bx.cast_backend_type(cast_ty);
let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
bx.load(ty, llslot, self.fn_abi.ret.layout.align.abi)
}
};
bx.ret(llval);
@ -853,9 +852,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Some(intrinsic) => {
let dest = match ret_dest {
_ if fn_abi.ret.is_indirect() => llargs[0],
ReturnDest::Nothing => {
bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
}
ReturnDest::Nothing => bx.const_undef(bx.type_ptr()),
ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
ReturnDest::DirectOperand(_) => {
bug!("Cannot use direct operand with an intrinsic call")
@ -1428,8 +1425,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Have to load the argument, maybe while casting it.
if let PassMode::Cast(ty, _) = &arg.mode {
let llty = bx.cast_backend_type(ty);
let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
llval = bx.load(llty, llval, align.min(arg.layout.align.abi));
} else {
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
@ -1634,7 +1630,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// represents that this is a catch-all block.
bx = Bx::build(self.cx, cp_llbb);
let null =
bx.const_null(bx.type_i8p_ext(bx.cx().data_layout().instruction_address_space));
bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space));
let sixty_four = bx.const_i32(64);
funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null]));
} else {

View File

@ -270,7 +270,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::const_allocate => {
// returns a null pointer at runtime.
bx.const_null(bx.type_i8p())
bx.const_null(bx.type_ptr())
}
sym::const_deallocate => {
@ -310,14 +310,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let weak = instruction == "cxchgweak";
let mut dst = args[0].immediate();
let dst = args[0].immediate();
let mut cmp = args[1].immediate();
let mut src = args[2].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first.
let ptr_llty = bx.type_ptr_to(bx.type_isize());
dst = bx.pointercast(dst, ptr_llty);
cmp = bx.ptrtoint(cmp, bx.type_isize());
src = bx.ptrtoint(src, bx.type_isize());
}
@ -342,13 +340,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let layout = bx.layout_of(ty);
let size = layout.size;
let mut source = args[0].immediate();
let source = args[0].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first...
let llty = bx.type_isize();
let ptr_llty = bx.type_ptr_to(llty);
source = bx.pointercast(source, ptr_llty);
let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
// ... and then cast the result back to a pointer
bx.inttoptr(result, bx.backend_type(layout))
@ -365,12 +361,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let size = bx.layout_of(ty).size;
let mut val = args[1].immediate();
let mut ptr = args[0].immediate();
let ptr = args[0].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first.
let ptr_llty = bx.type_ptr_to(bx.type_isize());
ptr = bx.pointercast(ptr, ptr_llty);
val = bx.ptrtoint(val, bx.type_isize());
}
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
@ -409,13 +403,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let mut ptr = args[0].immediate();
let ptr = args[0].immediate();
let mut val = args[1].immediate();
if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers,
// so we cast to integer first.
let ptr_llty = bx.type_ptr_to(bx.type_isize());
ptr = bx.pointercast(ptr, ptr_llty);
val = bx.ptrtoint(val, bx.type_isize());
}
bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
@ -470,10 +462,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
if !fn_abi.ret.is_ignore() {
if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty));
let ptr = bx.pointercast(result.llval, ptr_llty);
bx.store(llval, ptr, result.align);
if let PassMode::Cast(..) = &fn_abi.ret.mode {
bx.store(llval, result.llval, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
.val

View File

@ -2,7 +2,6 @@ use super::place::PlaceRef;
use super::{FunctionCx, LocalRef};
use crate::base;
use crate::common::TypeKind;
use crate::glue;
use crate::traits::*;
use crate::MemFlags;
@ -132,7 +131,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) -> Self {
let alloc_align = alloc.inner().align;
assert_eq!(alloc_align, layout.align.abi);
let ty = bx.type_ptr_to(bx.cx().backend_type(layout));
let read_scalar = |start, size, s: abi::Scalar, ty| {
let val = alloc
@ -156,7 +154,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => {
let size = s.size(bx);
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
let val = read_scalar(Size::ZERO, size, s, ty);
let val = read_scalar(Size::ZERO, size, s, bx.type_ptr());
OperandRef { val: OperandValue::Immediate(val), layout }
}
Abi::ScalarPair(
@ -187,7 +185,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let base_addr = bx.static_addr_of(init, alloc_align, None);
let llval = bx.const_ptr_byte_offset(base_addr, offset);
let llval = bx.const_bitcast(llval, ty);
bx.load_operand(PlaceRef::new_sized(llval, layout))
}
}
@ -314,38 +311,22 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) => {
// Bools in union fields needs to be truncated.
*llval = bx.to_immediate(*llval, field);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
let ty = bx.cx().immediate_backend_type(field);
if bx.type_kind(ty) == TypeKind::Pointer {
*llval = bx.pointercast(*llval, ty);
}
}
(OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
// Bools in union fields needs to be truncated.
*a = bx.to_immediate_scalar(*a, a_abi);
*b = bx.to_immediate_scalar(*b, b_abi);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
let a_ty = bx.cx().scalar_pair_element_backend_type(field, 0, true);
let b_ty = bx.cx().scalar_pair_element_backend_type(field, 1, true);
if bx.type_kind(a_ty) == TypeKind::Pointer {
*a = bx.pointercast(*a, a_ty);
}
if bx.type_kind(b_ty) == TypeKind::Pointer {
*b = bx.pointercast(*b, b_ty);
}
}
// Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
(OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => {
assert!(matches!(self.layout.abi, Abi::Vector { .. }));
let llty = bx.cx().backend_type(self.layout);
let llfield_ty = bx.cx().backend_type(field);
// Can't bitcast an aggregate, so round trip through memory.
let lltemp = bx.alloca(llfield_ty, field.align.abi);
let llptr = bx.pointercast(lltemp, bx.cx().type_ptr_to(llty));
let llptr = bx.alloca(llfield_ty, field.align.abi);
bx.store(*llval, llptr, field.align.abi);
*llval = bx.load(llfield_ty, lltemp, field.align.abi);
*llval = bx.load(llfield_ty, llptr, field.align.abi);
}
(OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => {
bug!()
@ -380,9 +361,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true);
OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1))
} else {
let bty = bx.cx().backend_type(layout);
let ptr_bty = bx.cx().type_ptr_to(bty);
OperandValue::Ref(bx.const_poison(ptr_bty), None, layout.align.abi)
let ptr = bx.cx().type_ptr();
OperandValue::Ref(bx.const_poison(ptr), None, layout.align.abi)
}
}
@ -434,8 +414,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let ty = bx.backend_type(dest.layout);
let ptr = bx.pointercast(r, bx.type_ptr_to(ty));
let val = bx.load(ty, ptr, source_align);
let val = bx.load(ty, r, source_align);
bx.store_with_flags(val, dest.llval, dest.align, flags);
return;
}

View File

@ -115,8 +115,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
}
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
// ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())])
bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())])
}
Abi::Scalar(_) | Abi::ScalarPair(..) => {
// All fields of Scalar and ScalarPair layouts must have been handled by this point.
@ -133,8 +132,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
}
};
PlaceRef {
// HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
llval,
llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
layout: field,
align: effective_field_align,
@ -194,20 +192,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
debug!("struct_field_ptr: DST field offset: {:?}", offset);
// Cast and adjust pointer.
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]);
// Adjust pointer.
let ptr = bx.gep(bx.cx().type_i8(), self.llval, &[offset]);
// Finally, cast back to the type expected.
let ll_fty = bx.cx().backend_type(field);
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
PlaceRef {
llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
llextra: self.llextra,
layout: field,
align: effective_field_align,
}
PlaceRef { llval: ptr, llextra: self.llextra, layout: field, align: effective_field_align }
}
/// Obtain the actual discriminant of a value.
@ -416,11 +404,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
) -> Self {
let mut downcast = *self;
downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
// Cast to the appropriate variant struct type.
let variant_ty = bx.cx().backend_type(downcast.layout);
downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
downcast
}
@ -431,11 +414,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
) -> Self {
let mut downcast = *self;
downcast.layout = bx.cx().layout_of(ty);
// Cast to the appropriate type.
let variant_ty = bx.cx().backend_type(downcast.layout);
downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
downcast
}
@ -515,13 +493,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
));
}
// Cast the place pointer type to the new
// array or slice type (`*[%_; new_len]`).
subslice.llval = bx.pointercast(
subslice.llval,
bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
);
subslice
}
mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),

View File

@ -182,9 +182,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Immediate(..) | OperandValue::Pair(..) => {
// When we have immediate(s), the alignment of the source is irrelevant,
// so we can store them using the destination's alignment.
let llty = bx.backend_type(src.layout);
let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, dst.align));
src.val.store(bx, PlaceRef::new_sized_aligned(dst.llval, src.layout, dst.align));
}
}
}
@ -222,9 +220,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Ref(ptr, meta, align) => {
debug_assert_eq!(meta, None);
debug_assert!(matches!(operand_kind, OperandValueKind::Ref));
let cast_bty = bx.backend_type(cast);
let cast_ptr = bx.pointercast(ptr, bx.type_ptr_to(cast_bty));
let fake_place = PlaceRef::new_sized_aligned(cast_ptr, cast, align);
let fake_place = PlaceRef::new_sized_aligned(ptr, cast, align);
Some(bx.load_operand(fake_place).val)
}
OperandValue::ZeroSized => {
@ -480,18 +476,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
{
if let OperandValue::Pair(data_ptr, meta) = operand.val {
if bx.cx().is_backend_scalar_pair(cast) {
let data_cast = bx.pointercast(
data_ptr,
bx.cx().scalar_pair_element_backend_type(cast, 0, true),
);
OperandValue::Pair(data_cast, meta)
OperandValue::Pair(data_ptr, meta)
} else {
// cast to thin-ptr
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
// pointer-cast of that pointer to desired pointer type.
let llcast_ty = bx.cx().immediate_backend_type(cast);
let llval = bx.pointercast(data_ptr, llcast_ty);
OperandValue::Immediate(llval)
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr.
OperandValue::Immediate(data_ptr)
}
} else {
bug!("unexpected non-pair operand");
@ -736,13 +724,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
let operand = self.codegen_operand(bx, operand);
let lloperand = operand.immediate();
let val = operand.immediate();
let content_ty = self.monomorphize(content_ty);
let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
let llty_ptr = bx.cx().backend_type(box_layout);
let val = bx.pointercast(lloperand, llty_ptr);
OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
}
}

View File

@ -36,7 +36,6 @@ pub trait ConstMethods<'tcx>: BackendTypes {
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value;
fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn const_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn const_ptr_byte_offset(&self, val: Self::Value, offset: abi::Size) -> Self::Value;
}

View File

@ -26,8 +26,8 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
fn type_kind(&self, ty: Self::Type) -> TypeKind;
fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
fn type_ptr_to_ext(&self, ty: Self::Type, address_space: AddressSpace) -> Self::Type;
fn type_ptr(&self) -> Self::Type;
fn type_ptr_ext(&self, address_space: AddressSpace) -> Self::Type;
fn element_type(&self, ty: Self::Type) -> Self::Type;
/// Returns the number of elements in `self` if it is a LLVM vector type.
@ -42,14 +42,6 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
}
pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
fn type_i8p(&self) -> Self::Type {
self.type_i8p_ext(AddressSpace::DATA)
}
fn type_i8p_ext(&self, address_space: AddressSpace) -> Self::Type {
self.type_ptr_to_ext(self.type_i8(), address_space)
}
fn type_int(&self) -> Self::Type {
match &self.sess().target.c_int_width[..] {
"16" => self.type_i16(),