Start implementing proper constants

Also rework ADTs to store their size
This commit is contained in:
khyperia 2020-09-03 15:35:54 +02:00
parent 582258a873
commit 04b4f8aa88
4 changed files with 283 additions and 105 deletions

View File

@ -7,6 +7,7 @@ use rustc_middle::ty::{Ty, TyKind};
use rustc_target::abi::call::{CastTarget, FnAbi, PassMode, Reg, RegKind};
use rustc_target::abi::{Abi, FieldsShape, LayoutOf, Primitive, Scalar, Variants};
use std::fmt;
use std::iter::once;
#[derive(Clone, Debug, PartialEq, Eq, Ord, PartialOrd, Hash)]
pub enum SpirvType {
@ -17,10 +18,11 @@ pub enum SpirvType {
/// This uses the rustc definition of "adt", i.e. a struct, enum, or union
Adt {
name: String,
// TODO: enums/unions
/// sizeof struct in *bytes*
size: Option<u32>,
field_types: Vec<Word>,
/// *byte* offsets
field_offsets: Option<Vec<u32>>,
field_offsets: Vec<u32>,
field_names: Option<Vec<String>>,
},
Opaque {
@ -34,6 +36,9 @@ pub enum SpirvType {
element: Word,
count: Word,
},
RuntimeArray {
element: Word,
},
Pointer {
storage_class: StorageClass,
pointee: Word,
@ -108,6 +113,7 @@ impl SpirvType {
SpirvType::Float(width) => cx.emit_global().type_float(width),
SpirvType::Adt {
ref name,
size: _,
ref field_types,
ref field_offsets,
ref field_names,
@ -117,15 +123,14 @@ impl SpirvType {
let id = emit.id();
let result = emit.type_struct_id(Some(id), field_types.iter().cloned());
emit.name(result, name);
if let Some(field_offsets) = field_offsets {
for (index, offset) in field_offsets.iter().copied().enumerate() {
emit.member_decorate(
result,
index as u32,
Decoration::Offset,
[Operand::LiteralInt32(offset)].iter().cloned(),
);
}
// The struct size is only used in our own sizeof_in_bits() (used in e.g. ArrayStride decoration)
for (index, offset) in field_offsets.iter().copied().enumerate() {
emit.member_decorate(
result,
index as u32,
Decoration::Offset,
[Operand::LiteralInt32(offset)].iter().cloned(),
);
}
if let Some(field_names) = field_names {
for (index, field_name) in field_names.iter().enumerate() {
@ -136,7 +141,22 @@ impl SpirvType {
}
SpirvType::Opaque { ref name } => cx.emit_global().type_opaque(name),
SpirvType::Vector { element, count } => cx.emit_global().type_vector(element, count),
SpirvType::Array { element, count } => cx.emit_global().type_array(element, count),
SpirvType::Array { element, count } => {
// ArrayStride decoration wants in *bytes*
let element_size = cx
.lookup_type(element)
.sizeof_in_bits(cx)
.expect("Element of sized array must be sized")
/ 8;
let result = cx.emit_global().type_array(element, count);
cx.emit_global().decorate(
result,
Decoration::ArrayStride,
once(Operand::LiteralInt32(element_size as u32)),
);
result
}
SpirvType::RuntimeArray { element } => cx.emit_global().type_runtime_array(element),
SpirvType::Pointer {
storage_class,
pointee,
@ -175,30 +195,27 @@ impl SpirvType {
SpirvTypePrinter { ty: self, cx }
}
pub fn sizeof_in_bits<'spv, 'tcx>(&self, cx: &CodegenCx<'spv, 'tcx>) -> usize {
match *self {
pub fn sizeof_in_bits<'spv, 'tcx>(&self, cx: &CodegenCx<'spv, 'tcx>) -> Option<usize> {
let result = match *self {
SpirvType::Void => 0,
SpirvType::Bool => 1,
SpirvType::Integer(width, _) => width as usize,
SpirvType::Float(width) => width as usize,
SpirvType::Adt {
ref field_types, ..
} => field_types
.iter()
.map(|&ty| cx.lookup_type(ty).sizeof_in_bits(cx))
.sum(),
SpirvType::Adt { size, .. } => (size? * 8) as usize,
SpirvType::Opaque { .. } => 0,
SpirvType::Vector { element, count } => {
cx.lookup_type(element).sizeof_in_bits(cx)
cx.lookup_type(element).sizeof_in_bits(cx)?
* cx.builder.lookup_const_u64(count).unwrap() as usize
}
SpirvType::Array { element, count } => {
cx.lookup_type(element).sizeof_in_bits(cx)
cx.lookup_type(element).sizeof_in_bits(cx)?
* cx.builder.lookup_const_u64(count).unwrap() as usize
}
SpirvType::RuntimeArray { .. } => return None,
SpirvType::Pointer { .. } => cx.tcx.data_layout.pointer_size.bits() as usize,
SpirvType::Function { .. } => cx.tcx.data_layout.pointer_size.bits() as usize,
}
};
Some(result)
}
pub fn memset_const_pattern<'spv, 'tcx>(
@ -245,6 +262,9 @@ impl SpirvType {
cx.emit_global()
.constant_composite(self.def(cx), vec![elem_pat; count])
}
SpirvType::RuntimeArray { .. } => {
panic!("memset on runtime arrays not implemented yet")
}
SpirvType::Pointer { .. } => panic!("memset on pointers not implemented yet"),
SpirvType::Function { .. } => panic!("memset on functions not implemented yet"),
}
@ -286,6 +306,9 @@ impl SpirvType {
)
.unwrap()
}
SpirvType::RuntimeArray { .. } => {
panic!("memset on runtime arrays not implemented yet")
}
SpirvType::Pointer { .. } => panic!("memset on pointers not implemented yet"),
SpirvType::Function { .. } => panic!("memset on functions not implemented yet"),
}
@ -310,6 +333,7 @@ impl fmt::Debug for SpirvTypePrinter<'_, '_, '_> {
SpirvType::Float(width) => f.debug_struct("Float").field("width", &width).finish(),
SpirvType::Adt {
ref name,
size,
ref field_types,
ref field_offsets,
ref field_names,
@ -320,6 +344,7 @@ impl fmt::Debug for SpirvTypePrinter<'_, '_, '_> {
.collect::<Vec<_>>();
f.debug_struct("Adt")
.field("name", &name)
.field("size", &size)
.field("field_types", &fields)
.field("field_offsets", field_offsets)
.field("field_names", field_names)
@ -352,6 +377,10 @@ impl fmt::Debug for SpirvTypePrinter<'_, '_, '_> {
.expect("Array type has invalid count value"),
)
.finish(),
SpirvType::RuntimeArray { element } => f
.debug_struct("RuntimeArray")
.field("element", &self.cx.debug_type(element))
.finish(),
SpirvType::Pointer {
storage_class,
pointee,
@ -389,6 +418,7 @@ impl fmt::Display for SpirvTypePrinter<'_, '_, '_> {
SpirvType::Float(width) => write!(f, "f{}", width),
SpirvType::Adt {
ref name,
size: _,
ref field_types,
field_offsets: _,
ref field_names,
@ -420,6 +450,10 @@ impl fmt::Display for SpirvTypePrinter<'_, '_, '_> {
let len = len.expect("Array type has invalid count value");
write!(f, "[{}; {}]", elem, len)
}
SpirvType::RuntimeArray { element } => {
let elem = self.cx.debug_type(element);
write!(f, "[{}]", elem)
}
SpirvType::Pointer {
storage_class,
pointee,
@ -518,10 +552,12 @@ impl<'spv, 'tcx> ConvSpirvType<'spv, 'tcx> for CastTarget {
args.push(SpirvType::Integer(rem_bytes as u32 * 8, false).def(cx));
}
let (field_offsets, size) = auto_struct_layout(cx, &args);
SpirvType::Adt {
name: "<cast_target>".to_string(),
size,
field_types: args,
field_offsets: None,
field_offsets,
field_names: None,
}
.def(cx)
@ -608,8 +644,9 @@ fn trans_type_impl<'spv, 'tcx>(
// An empty struct is zero-sized
return SpirvType::Adt {
name: "<zst>".to_string(),
size: Some(0),
field_types: Vec::new(),
field_offsets: None,
field_offsets: Vec::new(),
field_names: None,
}
.def(cx);
@ -627,10 +664,12 @@ fn trans_type_impl<'spv, 'tcx>(
// Note! Do not pass through is_immediate here - they're wrapped in a struct, hence, not immediate.
let one_spirv = trans_scalar_pair_impl(cx, ty, one, 0, false);
let two_spirv = trans_scalar_pair_impl(cx, ty, two, 1, false);
let (field_offsets, size) = auto_struct_layout(cx, &[one_spirv, two_spirv]);
SpirvType::Adt {
name: format!("{}", ty.ty),
size,
field_types: vec![one_spirv, two_spirv],
field_offsets: None,
field_offsets,
field_names: None,
}
.def(cx)
@ -778,7 +817,10 @@ fn trans_scalar_generic<'spv, 'tcx>(
// the right type at the use site.
SpirvType::Pointer {
storage_class: StorageClass::Generic,
pointee: SpirvType::Integer(8, false).def(cx),
pointee: SpirvType::Opaque {
name: "<unknown_ptr>".to_string(),
}
.def(cx),
}
.def(cx)
}
@ -793,7 +835,8 @@ fn trans_aggregate<'spv, 'tcx>(cx: &CodegenCx<'spv, 'tcx>, ty: &TyAndLayout<'tcx
),
// TODO: Is this the right thing to do?
FieldsShape::Union(_field_count) => {
assert_ne!(ty.size.bytes(), 0);
assert_ne!(ty.size.bytes(), 0, "{:#?}", ty);
assert!(!ty.is_unsized(), "{:#?}", ty);
let byte = SpirvType::Integer(8, false).def(cx);
let count = cx.constant_u32(ty.size.bytes() as u32).def;
SpirvType::Array {
@ -803,17 +846,30 @@ fn trans_aggregate<'spv, 'tcx>(cx: &CodegenCx<'spv, 'tcx>, ty: &TyAndLayout<'tcx
.def(cx)
}
FieldsShape::Array { stride: _, count } => {
// spir-v doesn't support zero-sized arrays
// note that zero-sized arrays don't report as .is_zst() for some reason? TODO: investigate why
let nonzero_count = if count == 0 { 1 } else { count };
// TODO: Assert stride is same as spirv's stride?
let element_type = ty.field(cx, 0).spirv_type(cx);
let count_const = cx.constant_u32(nonzero_count as u32).def;
SpirvType::Array {
element: element_type,
count: count_const,
if ty.is_unsized() {
// There's a potential for this array to be sized, but the element to be unsized, e.g. `[[u8]; 5]`.
// However, I think rust disallows all these cases, so assert this here.
assert_eq!(count, 0);
SpirvType::RuntimeArray {
element: element_type,
}
.def(cx)
} else {
// note that zero-sized arrays don't report as .is_zst() for some reason? TODO: investigate why
assert_ne!(
count, 0,
"spir-v doesn't support zero-sized arrays: {:#?}",
ty
);
// TODO: Assert stride is same as spirv's stride?
let count_const = cx.constant_u32(count as u32).def;
SpirvType::Array {
element: element_type,
count: count_const,
}
.def(cx)
}
.def(cx)
}
FieldsShape::Arbitrary {
offsets: _,
@ -822,6 +878,22 @@ fn trans_aggregate<'spv, 'tcx>(cx: &CodegenCx<'spv, 'tcx>, ty: &TyAndLayout<'tcx
}
}
// returns (field_offsets, size)
pub fn auto_struct_layout<'spv, 'tcx>(
cx: &CodegenCx<'spv, 'tcx>,
field_types: &[Word],
) -> (Vec<u32>, Option<u32>) {
let mut field_offsets = Vec::with_capacity(field_types.len());
let mut offset = Some(0);
for &field_type in field_types {
let this_offset = offset.expect("Unsized values can only be the last field in a struct");
field_offsets.push(this_offset);
let field_size_bits = cx.lookup_type(field_type).sizeof_in_bits(cx);
offset = field_size_bits.map(|size| this_offset + (size / 8) as u32);
}
(field_offsets, offset)
}
// see struct_llfields in librustc_codegen_llvm for implementation hints
fn trans_struct<'spv, 'tcx>(cx: &CodegenCx<'spv, 'tcx>, ty: &TyAndLayout<'tcx>) -> Word {
// TODO: enums
@ -841,6 +913,11 @@ fn trans_struct<'spv, 'tcx>(cx: &CodegenCx<'spv, 'tcx>, ty: &TyAndLayout<'tcx>)
}
other => panic!("TODO: Unimplemented TyKind in trans_struct: {:?}", other),
};
let size = if ty.is_unsized() {
None
} else {
Some(ty.size.bytes() as u32)
};
let mut field_types = Vec::new();
let mut field_offsets = Vec::new();
let mut field_names = Vec::new();
@ -870,8 +947,9 @@ fn trans_struct<'spv, 'tcx>(cx: &CodegenCx<'spv, 'tcx>, ty: &TyAndLayout<'tcx>)
}
SpirvType::Adt {
name,
size,
field_types,
field_offsets: Some(field_offsets),
field_offsets,
field_names: Some(field_names),
}
.def(cx)

View File

@ -441,7 +441,9 @@ impl<'a, 'spv, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'spv, 'tcx> {
pointee,
} => match self.lookup_type(pointee) {
SpirvType::Adt { field_types, .. } => (storage_class, field_types[idx as usize]),
SpirvType::Array { element, .. } => (storage_class, element),
SpirvType::Array { element, .. } | SpirvType::RuntimeArray { element, .. } => {
(storage_class, element)
}
other => panic!(
"struct_gep not on struct or array type: {:?}, index {}",
other, idx
@ -747,7 +749,11 @@ impl<'a, 'spv, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'spv, 'tcx> {
let pat = elem_ty_spv
.memset_const_pattern(self, fill_byte)
.with_type(elem_ty);
let count = size / (elem_ty_spv.sizeof_in_bits(self) / 8);
let count = size
/ (elem_ty_spv
.sizeof_in_bits(self)
.expect("Memset on unsized values not supported")
/ 8);
if count == 1 {
self.store(pat, ptr, Align::from_bytes(0).unwrap());
} else {
@ -769,7 +775,11 @@ impl<'a, 'spv, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'spv, 'tcx> {
let pat = elem_ty_spv
.memset_dynamic_pattern(self, fill_byte.def)
.with_type(elem_ty);
let count = size / (elem_ty_spv.sizeof_in_bits(self) / 8);
let count = size
/ (elem_ty_spv
.sizeof_in_bits(self)
.expect("Memset on unsized values not supported")
/ 8);
if count == 1 {
self.store(pat, ptr, Align::from_bytes(0).unwrap());
} else {

View File

@ -85,7 +85,7 @@ impl<'a, 'spv, 'tcx> Builder<'a, 'spv, 'tcx> {
for index in indices.iter().cloned().skip(1) {
result_indices.push(index.def);
result_pointee_type = match self.lookup_type(result_pointee_type) {
SpirvType::Array { element, count: _ } => element,
SpirvType::Array { element, .. } | SpirvType::RuntimeArray { element } => element,
_ => panic!(
"GEP not implemented for type {}",
self.debug_type(result_pointee_type)

View File

@ -9,8 +9,7 @@ use rustc_codegen_ssa::mir::debuginfo::{FunctionDebugContext, VariableKind};
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{
AsmMethods, BackendTypes, BaseTypeMethods, ConstMethods, CoverageInfoMethods, DebugInfoMethods,
DeclareMethods, DerivedTypeMethods, LayoutTypeMethods, MiscMethods, PreDefineMethods,
StaticMethods,
DeclareMethods, LayoutTypeMethods, MiscMethods, PreDefineMethods, StaticMethods,
};
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::GlobalAsm;
@ -168,13 +167,25 @@ impl<'spv, 'tcx> CodegenCx<'spv, 'tcx> {
self.builder.constant_u64(ty, val).with_type(ty)
}
#[allow(dead_code)]
pub fn constant_int(&self, ty: Word, val: u64) -> SpirvValue {
match self.lookup_type(ty) {
SpirvType::Integer(width, _) => {
if width > 32 {
self.builder.constant_u64(ty, val).with_type(ty)
} else {
assert!(val <= u32::MAX as u64);
self.builder.constant_u32(ty, val as u32).with_type(ty)
}
}
other => panic!("Cannot constant_int on type {}", other.debug(self)),
}
}
pub fn constant_f32(&self, val: f32) -> SpirvValue {
let ty = SpirvType::Float(32).def(self);
self.builder.constant_f32(ty, val).with_type(ty)
}
#[allow(dead_code)]
pub fn constant_f64(&self, val: f64) -> SpirvValue {
let ty = SpirvType::Float(64).def(self);
self.builder.constant_f64(ty, val).with_type(ty)
@ -379,10 +390,12 @@ impl<'spv, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'spv, 'tcx> {
.def(self)
}
fn type_struct(&self, els: &[Self::Type], _packed: bool) -> Self::Type {
let (field_offsets, size) = crate::abi::auto_struct_layout(self, &els);
SpirvType::Adt {
name: "<generated_struct>".to_string(),
size,
field_types: els.to_vec(),
field_offsets: None,
field_offsets,
field_names: None,
}
.def(self)
@ -402,6 +415,7 @@ impl<'spv, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'spv, 'tcx> {
SpirvType::Opaque { .. } => TypeKind::Struct,
SpirvType::Vector { .. } => TypeKind::Vector,
SpirvType::Array { .. } => TypeKind::Array,
SpirvType::RuntimeArray { .. } => TypeKind::Array,
SpirvType::Pointer { .. } => TypeKind::Pointer,
SpirvType::Function { .. } => TypeKind::Function,
}
@ -894,11 +908,13 @@ impl<'spv, 'tcx> ConstMethods<'tcx> for CodegenCx<'spv, 'tcx> {
}
fn const_struct(&self, elts: &[Self::Value], _packed: bool) -> Self::Value {
// Presumably this will get bitcasted to the right type?
let field_types = elts.iter().map(|f| f.ty).collect();
let field_types = elts.iter().map(|f| f.ty).collect::<Vec<_>>();
let (field_offsets, size) = crate::abi::auto_struct_layout(self, &field_types);
let struct_ty = SpirvType::Adt {
name: "<const_struct>".to_string(),
size,
field_types,
field_offsets: None,
field_offsets,
field_names: None,
}
.def(self);
@ -1012,67 +1028,141 @@ impl<'spv, 'tcx> ConstMethods<'tcx> for CodegenCx<'spv, 'tcx> {
}
}
// Directly copied from rustc_codegen_llvm consts.rs const_alloc_to_llvm
fn create_const_alloc(cx: &CodegenCx<'_, '_>, alloc: &Allocation, ty: Word) -> SpirvValue {
let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
let dl = cx.data_layout();
let pointer_size = dl.pointer_size.bytes() as usize;
println!("Creating const alloc of type {}", cx.debug_type(ty));
let mut offset = 0;
let result = create_const_alloc2(cx, alloc, &mut offset, ty);
assert_eq!(
offset,
alloc.len(),
"create_const_alloc must consume all bytes of an Allocation"
);
println!("Done creating alloc of type {}", cx.debug_type(ty));
result
}
let mut next_offset = 0;
for &(offset, ((), alloc_id)) in alloc.relocations().iter() {
let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
if offset > next_offset {
// This `inspect` is okay since we have checked that it is not within a relocation, it
// is within the bounds of the allocation, and it doesn't affect interpreter execution
// (we inspect the result after interpreter execution). Any undef byte is replaced with
// some arbitrary byte value.
//
// FIXME: relay undef bytes to codegen as undef const bytes
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
llvals.push(const_bytes(cx, bytes));
fn create_const_alloc2(
cx: &CodegenCx<'_, '_>,
alloc: &Allocation,
offset: &mut usize,
ty: Word,
) -> SpirvValue {
println!("const at {}: {}", *offset, cx.debug_type(ty));
match cx.lookup_type(ty) {
SpirvType::Void => panic!("Cannot create const alloc of type void"),
SpirvType::Bool => match read_alloc_val(cx, alloc, offset, 1) != 0 {
true => cx.emit_global().constant_true(ty),
false => cx.emit_global().constant_false(ty),
}
.with_type(ty),
SpirvType::Integer(width, _) => {
let v = read_alloc_val(cx, alloc, offset, (width / 8) as usize);
cx.constant_int(ty, v as u64)
}
SpirvType::Float(width) => {
let v = read_alloc_val(cx, alloc, offset, (width / 8) as usize);
match width {
32 => cx.constant_f32(f32::from_bits(v as u32)),
64 => cx.constant_f64(f64::from_bits(v as u64)),
other => panic!("invalid float width {}", other),
}
}
SpirvType::Adt {
size,
field_types,
field_offsets,
..
} => {
let base = *offset;
let values = field_types
.iter()
.zip(field_offsets.iter())
.map(|(&ty, &field_offset)| {
create_const_alloc2(cx, alloc, &mut (base + field_offset as usize), ty).def
})
.collect::<Vec<_>>();
if let Some(size) = size {
*offset += size as usize;
} else {
assert_eq!(
*offset,
alloc.len(),
"create_const_alloc must consume all bytes of an Allocation after an unsized struct"
);
}
cx.emit_global()
.constant_composite(ty, values)
.with_type(ty)
}
SpirvType::Opaque { name } => panic!("Cannot create const alloc of type opaque: {}", name),
SpirvType::Vector { element, count } | SpirvType::Array { element, count } => {
let count = cx.builder.lookup_const_u64(count).unwrap() as usize;
let values = (0..count)
.map(|_| create_const_alloc2(cx, alloc, offset, element).def)
.collect::<Vec<_>>();
cx.emit_global()
.constant_composite(ty, values)
.with_type(ty)
}
SpirvType::RuntimeArray { element } => {
let mut values = Vec::new();
while *offset != alloc.len() {
values.push(create_const_alloc2(cx, alloc, offset, element).def);
}
cx.emit_global()
.constant_composite(ty, values)
.with_type(ty)
}
SpirvType::Pointer { .. } => {
let ptr = read_alloc_ptr(cx, alloc, offset);
cx.scalar_to_backend(
ptr.into(),
&abi::Scalar {
value: Primitive::Pointer,
valid_range: 0..=!0,
},
ty,
)
}
SpirvType::Function { .. } => {
panic!("TODO: SpirvType::Function not supported yet in create_const_alloc")
}
let ptr_offset = read_target_uint(
dl.endian,
// This `inspect` is okay since it is within the bounds of the allocation, it doesn't
// affect interpreter execution (we inspect the result after interpreter execution),
// and we properly interpret the relocation as a relocation pointer offset.
alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
)
.expect("const_alloc_to_llvm: could not read relocation pointer")
as u64;
let address_space = match cx.tcx.global_alloc(alloc_id) {
GlobalAlloc::Function(..) => cx.data_layout().instruction_address_space,
GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) => AddressSpace::DATA,
};
llvals.push(cx.scalar_to_backend(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
&abi::Scalar {
value: Primitive::Pointer,
valid_range: 0..=!0,
},
cx.type_i8p_ext(address_space),
));
next_offset = offset + pointer_size;
}
if alloc.len() >= next_offset {
let range = next_offset..alloc.len();
// This `inspect` is okay since we have check that it is after all relocations, it is
// within the bounds of the allocation, and it doesn't affect interpreter execution (we
// inspect the result after interpreter execution). Any undef byte is replaced with some
// arbitrary byte value.
//
// FIXME: relay undef bytes to codegen as undef const bytes
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
llvals.push(const_bytes(cx, bytes));
}
}
let result = cx.const_struct(&llvals, true);
// TODO: hack to get things working, this *will* fail spirv-val
result.def.with_type(ty)
// Advances offset by len
fn read_alloc_val<'a>(
cx: &CodegenCx<'_, '_>,
alloc: &'a Allocation,
offset: &mut usize,
len: usize,
) -> u128 {
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(*offset..(*offset + len));
// check relocations (pointer values)
assert!({
let start = offset.saturating_sub(cx.data_layout().pointer_size.bytes() as usize - 1);
let end = *offset + len;
alloc
.relocations()
.range(Size::from_bytes(start)..Size::from_bytes(end))
.is_empty()
});
*offset += len;
read_target_uint(cx.data_layout().endian, bytes).unwrap()
}
// Advances offset by ptr size
fn read_alloc_ptr<'a>(
cx: &CodegenCx<'_, '_>,
alloc: &'a Allocation,
offset: &mut usize,
) -> Pointer {
let len = cx.data_layout().pointer_size.bytes() as usize;
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(*offset..(*offset + len));
let inner_offset = read_target_uint(cx.data_layout().endian, bytes).unwrap();
let &((), alloc_id) = alloc.relocations().get(&Size::from_bytes(*offset)).unwrap();
*offset += len;
Pointer::new_with_tag(alloc_id, Size::from_bytes(inner_offset), ())
}
fn const_bytes(cx: &CodegenCx<'_, '_>, bytes: &[u8]) -> SpirvValue {