Teach llvm backend how to fall back to default bodies

This commit is contained in:
Oli Scherer 2024-01-31 20:39:59 +00:00
parent 432635a9ea
commit 9a0743747f
11 changed files with 109 additions and 103 deletions

View File

@ -396,9 +396,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
source_info, source_info,
) { ) {
Ok(()) => return, Ok(()) => return,
// Unimplemented intrinsics must have a fallback body. The fallback body is obtained Err(instance) => Some(instance),
// by converting the `InstanceDef::Intrinsic` to an `InstanceDef::Item`.
Err(()) => Some(Instance::new(instance.def_id(), instance.args)),
} }
} }
InstanceDef::DropGlue(_, None) => { InstanceDef::DropGlue(_, None) => {

View File

@ -268,7 +268,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
destination: CPlace<'tcx>, destination: CPlace<'tcx>,
target: Option<BasicBlock>, target: Option<BasicBlock>,
source_info: mir::SourceInfo, source_info: mir::SourceInfo,
) -> Result<(), ()> { ) -> Result<(), Instance<'tcx>> {
let intrinsic = fx.tcx.item_name(instance.def_id()); let intrinsic = fx.tcx.item_name(instance.def_id());
let instance_args = instance.args; let instance_args = instance.args;
@ -431,7 +431,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret: CPlace<'tcx>, ret: CPlace<'tcx>,
destination: Option<BasicBlock>, destination: Option<BasicBlock>,
source_info: mir::SourceInfo, source_info: mir::SourceInfo,
) -> Result<(), ()> { ) -> Result<(), Instance<'tcx>> {
assert_eq!(generic_args, instance.args); assert_eq!(generic_args, instance.args);
let usize_layout = fx.layout_of(fx.tcx.types.usize); let usize_layout = fx.layout_of(fx.tcx.types.usize);
@ -1229,14 +1229,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
ret.write_cvalue(fx, CValue::by_val(cmp, ret.layout())); ret.write_cvalue(fx, CValue::by_val(cmp, ret.layout()));
} }
sym::const_allocate => {
intrinsic_args!(fx, args => (_size, _align); intrinsic);
// returns a null pointer at runtime.
let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
}
sym::const_deallocate => { sym::const_deallocate => {
intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic); intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
// nop at runtime. // nop at runtime.
@ -1257,7 +1249,9 @@ fn codegen_regular_intrinsic_call<'tcx>(
); );
} }
_ => return Err(()), // Unimplemented intrinsics must have a fallback body. The fallback body is obtained
// by converting the `InstanceDef::Intrinsic` to an `InstanceDef::Item`.
_ => return Err(Instance::new(instance.def_id(), instance.args)),
} }
let ret_block = fx.get_block(destination.unwrap()); let ret_block = fx.get_block(destination.unwrap());

View File

@ -90,7 +90,7 @@ fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) ->
} }
impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) { fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) -> Result<(), Instance<'tcx>> {
let tcx = self.tcx; let tcx = self.tcx;
let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
@ -137,7 +137,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
args[2].immediate(), args[2].immediate(),
llresult, llresult,
); );
return; return Ok(());
} }
sym::breakpoint => { sym::breakpoint => {
unimplemented!(); unimplemented!();
@ -166,12 +166,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
sym::volatile_store => { sym::volatile_store => {
let dst = args[0].deref(self.cx()); let dst = args[0].deref(self.cx());
args[1].val.volatile_store(self, dst); args[1].val.volatile_store(self, dst);
return; return Ok(());
} }
sym::unaligned_volatile_store => { sym::unaligned_volatile_store => {
let dst = args[0].deref(self.cx()); let dst = args[0].deref(self.cx());
args[1].val.unaligned_volatile_store(self, dst); args[1].val.unaligned_volatile_store(self, dst);
return; return Ok(());
} }
sym::prefetch_read_data sym::prefetch_read_data
| sym::prefetch_write_data | sym::prefetch_write_data
@ -269,7 +269,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
}, },
None => { None => {
tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty }); tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
return; return Ok(());
} }
} }
} }
@ -339,7 +339,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
extended_asm.set_volatile_flag(true); extended_asm.set_volatile_flag(true);
// We have copied the value to `result` already. // We have copied the value to `result` already.
return; return Ok(());
} }
sym::ptr_mask => { sym::ptr_mask => {
@ -357,11 +357,12 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
_ if name_str.starts_with("simd_") => { _ if name_str.starts_with("simd_") => {
match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
Ok(llval) => llval, Ok(llval) => llval,
Err(()) => return, Err(()) => return Ok(()),
} }
} }
_ => bug!("unknown intrinsic '{}'", name), // Fall back to default body
_ => return Err(Instance::new(instance.def_id(), instance.args)),
}; };
if !fn_abi.ret.is_ignore() { if !fn_abi.ret.is_ignore() {
@ -376,6 +377,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
.store(self, result); .store(self, result);
} }
} }
Ok(())
} }
fn abort(&mut self) { fn abort(&mut self) {

View File

@ -86,7 +86,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
args: &[OperandRef<'tcx, &'ll Value>], args: &[OperandRef<'tcx, &'ll Value>],
llresult: &'ll Value, llresult: &'ll Value,
span: Span, span: Span,
) { ) -> Result<(), ty::Instance<'tcx>> {
let tcx = self.tcx; let tcx = self.tcx;
let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all()); let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
@ -141,7 +141,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
args[2].immediate(), args[2].immediate(),
llresult, llresult,
); );
return; return Ok(());
} }
sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]), sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]),
sym::va_copy => { sym::va_copy => {
@ -194,17 +194,17 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
if !result.layout.is_zst() { if !result.layout.is_zst() {
self.store(load, result.llval, result.align); self.store(load, result.llval, result.align);
} }
return; return Ok(());
} }
sym::volatile_store => { sym::volatile_store => {
let dst = args[0].deref(self.cx()); let dst = args[0].deref(self.cx());
args[1].val.volatile_store(self, dst); args[1].val.volatile_store(self, dst);
return; return Ok(());
} }
sym::unaligned_volatile_store => { sym::unaligned_volatile_store => {
let dst = args[0].deref(self.cx()); let dst = args[0].deref(self.cx());
args[1].val.unaligned_volatile_store(self, dst); args[1].val.unaligned_volatile_store(self, dst);
return; return Ok(());
} }
sym::prefetch_read_data sym::prefetch_read_data
| sym::prefetch_write_data | sym::prefetch_write_data
@ -305,7 +305,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
name, name,
ty, ty,
}); });
return; return Ok(());
} }
} }
} }
@ -387,7 +387,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
.unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`")); .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
// We have copied the value to `result` already. // We have copied the value to `result` already.
return; return Ok(());
} }
_ if name.as_str().starts_with("simd_") => { _ if name.as_str().starts_with("simd_") => {
@ -395,11 +395,15 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span, self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span,
) { ) {
Ok(llval) => llval, Ok(llval) => llval,
Err(()) => return, Err(()) => return Ok(()),
} }
} }
_ => bug!("unknown intrinsic '{}' -- should it have been lowered earlier?", name), _ => {
debug!("unknown intrinsic '{}' -- falling back to default body", name);
// Call the fallback body instead of generating the intrinsic code
return Err(ty::Instance::new(instance.def_id(), instance.args));
}
}; };
if !fn_abi.ret.is_ignore() { if !fn_abi.ret.is_ignore() {
@ -411,6 +415,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
.store(self, result); .store(self, result);
} }
} }
Ok(())
} }
fn abort(&mut self) { fn abort(&mut self) {

View File

@ -837,8 +837,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}; };
} }
match intrinsic { let instance = match intrinsic {
None | Some(sym::drop_in_place) => {} None | Some(sym::drop_in_place) => instance,
Some(intrinsic) => { Some(intrinsic) => {
let mut llargs = Vec::with_capacity(1); let mut llargs = Vec::with_capacity(1);
let ret_dest = self.make_return_dest( let ret_dest = self.make_return_dest(
@ -882,27 +882,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}) })
.collect(); .collect();
Self::codegen_intrinsic_call( let instance = *instance.as_ref().unwrap();
bx, match Self::codegen_intrinsic_call(bx, instance, fn_abi, &args, dest, span) {
*instance.as_ref().unwrap(), Ok(()) => {
fn_abi, if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
&args, self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
dest, }
span,
);
if let ReturnDest::IndirectOperand(dst, _) = ret_dest { return if let Some(target) = target {
self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval); helper.funclet_br(self, bx, target, mergeable_succ)
} else {
bx.unreachable();
MergingSucc::False
};
}
Err(instance) => Some(instance),
} }
return if let Some(target) = target {
helper.funclet_br(self, bx, target, mergeable_succ)
} else {
bx.unreachable();
MergingSucc::False
};
} }
} };
let mut llargs = Vec::with_capacity(arg_count); let mut llargs = Vec::with_capacity(arg_count);
let destination = target.as_ref().map(|&target| { let destination = target.as_ref().map(|&target| {

View File

@ -54,6 +54,7 @@ fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
} }
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// In the `Err` case, returns the instance that should be called instead.
pub fn codegen_intrinsic_call( pub fn codegen_intrinsic_call(
bx: &mut Bx, bx: &mut Bx,
instance: ty::Instance<'tcx>, instance: ty::Instance<'tcx>,
@ -61,7 +62,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args: &[OperandRef<'tcx, Bx::Value>], args: &[OperandRef<'tcx, Bx::Value>],
llresult: Bx::Value, llresult: Bx::Value,
span: Span, span: Span,
) { ) -> Result<(), ty::Instance<'tcx>> {
let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all()); let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else { let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
@ -81,7 +82,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let llval = match name { let llval = match name {
sym::abort => { sym::abort => {
bx.abort(); bx.abort();
return; return Ok(());
} }
sym::va_start => bx.va_start(args[0].immediate()), sym::va_start => bx.va_start(args[0].immediate()),
@ -150,7 +151,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[0].immediate(), args[0].immediate(),
args[2].immediate(), args[2].immediate(),
); );
return; return Ok(());
} }
sym::write_bytes => { sym::write_bytes => {
memset_intrinsic( memset_intrinsic(
@ -161,7 +162,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(), args[1].immediate(),
args[2].immediate(), args[2].immediate(),
); );
return; return Ok(());
} }
sym::volatile_copy_nonoverlapping_memory => { sym::volatile_copy_nonoverlapping_memory => {
@ -174,7 +175,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(), args[1].immediate(),
args[2].immediate(), args[2].immediate(),
); );
return; return Ok(());
} }
sym::volatile_copy_memory => { sym::volatile_copy_memory => {
copy_intrinsic( copy_intrinsic(
@ -186,7 +187,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(), args[1].immediate(),
args[2].immediate(), args[2].immediate(),
); );
return; return Ok(());
} }
sym::volatile_set_memory => { sym::volatile_set_memory => {
memset_intrinsic( memset_intrinsic(
@ -197,17 +198,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args[1].immediate(), args[1].immediate(),
args[2].immediate(), args[2].immediate(),
); );
return; return Ok(());
} }
sym::volatile_store => { sym::volatile_store => {
let dst = args[0].deref(bx.cx()); let dst = args[0].deref(bx.cx());
args[1].val.volatile_store(bx, dst); args[1].val.volatile_store(bx, dst);
return; return Ok(());
} }
sym::unaligned_volatile_store => { sym::unaligned_volatile_store => {
let dst = args[0].deref(bx.cx()); let dst = args[0].deref(bx.cx());
args[1].val.unaligned_volatile_store(bx, dst); args[1].val.unaligned_volatile_store(bx, dst);
return; return Ok(());
} }
sym::exact_div => { sym::exact_div => {
let ty = arg_tys[0]; let ty = arg_tys[0];
@ -225,7 +226,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
name, name,
ty, ty,
}); });
return; return Ok(());
} }
} }
} }
@ -245,7 +246,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
name, name,
ty: arg_tys[0], ty: arg_tys[0],
}); });
return; return Ok(());
} }
} }
} }
@ -256,14 +257,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
span, span,
ty: arg_tys[0], ty: arg_tys[0],
}); });
return; return Ok(());
} }
let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else { let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked { bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked {
span, span,
ty: ret_ty, ty: ret_ty,
}); });
return; return Ok(());
}; };
if signed { if signed {
bx.fptosi(args[0].immediate(), llret_ty) bx.fptosi(args[0].immediate(), llret_ty)
@ -280,14 +281,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
} }
sym::const_allocate => {
// returns a null pointer at runtime.
bx.const_null(bx.type_ptr())
}
sym::const_deallocate => { sym::const_deallocate => {
// nop at runtime. // nop at runtime.
return; return Ok(());
} }
// This requires that atomic intrinsics follow a specific naming pattern: // This requires that atomic intrinsics follow a specific naming pattern:
@ -350,10 +346,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.store(val, dest.llval, dest.align); bx.store(val, dest.llval, dest.align);
let dest = result.project_field(bx, 1); let dest = result.project_field(bx, 1);
bx.store(success, dest.llval, dest.align); bx.store(success, dest.llval, dest.align);
return;
} else { } else {
return invalid_monomorphization(ty); invalid_monomorphization(ty);
} }
return Ok(());
} }
"load" => { "load" => {
@ -383,7 +379,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
) )
} }
} else { } else {
return invalid_monomorphization(ty); invalid_monomorphization(ty);
return Ok(());
} }
} }
@ -399,10 +396,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
val = bx.ptrtoint(val, bx.type_isize()); val = bx.ptrtoint(val, bx.type_isize());
} }
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size); bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
return;
} else { } else {
return invalid_monomorphization(ty); invalid_monomorphization(ty);
} }
return Ok(());
} }
"fence" => { "fence" => {
@ -410,7 +407,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
parse_ordering(bx, ordering), parse_ordering(bx, ordering),
SynchronizationScope::CrossThread, SynchronizationScope::CrossThread,
); );
return; return Ok(());
} }
"singlethreadfence" => { "singlethreadfence" => {
@ -418,7 +415,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
parse_ordering(bx, ordering), parse_ordering(bx, ordering),
SynchronizationScope::SingleThread, SynchronizationScope::SingleThread,
); );
return; return Ok(());
} }
// These are all AtomicRMW ops // These are all AtomicRMW ops
@ -449,7 +446,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering)) bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
} else { } else {
return invalid_monomorphization(ty); invalid_monomorphization(ty);
return Ok(());
} }
} }
} }
@ -458,7 +456,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::nontemporal_store => { sym::nontemporal_store => {
let dst = args[0].deref(bx.cx()); let dst = args[0].deref(bx.cx());
args[1].val.nontemporal_store(bx, dst); args[1].val.nontemporal_store(bx, dst);
return; return Ok(());
} }
sym::ptr_guaranteed_cmp => { sym::ptr_guaranteed_cmp => {
@ -493,8 +491,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => { _ => {
// Need to use backend-specific things in the implementation. // Need to use backend-specific things in the implementation.
bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span); return bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
return;
} }
}; };
@ -507,6 +504,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
.store(bx, result); .store(bx, result);
} }
} }
Ok(())
} }
} }

View File

@ -8,6 +8,8 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
/// Remember to add all intrinsics here, in `compiler/rustc_hir_analysis/src/check/mod.rs`, /// Remember to add all intrinsics here, in `compiler/rustc_hir_analysis/src/check/mod.rs`,
/// and in `library/core/src/intrinsics.rs`; if you need access to any LLVM intrinsics, /// and in `library/core/src/intrinsics.rs`; if you need access to any LLVM intrinsics,
/// add them to `compiler/rustc_codegen_llvm/src/context.rs`. /// add them to `compiler/rustc_codegen_llvm/src/context.rs`.
/// Returns `Err` if another instance should be called instead. This is used to invoke
/// intrinsic default bodies in case an intrinsic is not implemented by the backend.
fn codegen_intrinsic_call( fn codegen_intrinsic_call(
&mut self, &mut self,
instance: ty::Instance<'tcx>, instance: ty::Instance<'tcx>,
@ -15,7 +17,7 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
args: &[OperandRef<'tcx, Self::Value>], args: &[OperandRef<'tcx, Self::Value>],
llresult: Self::Value, llresult: Self::Value,
span: Span, span: Span,
); ) -> Result<(), ty::Instance<'tcx>>;
fn abort(&mut self); fn abort(&mut self);
fn assume(&mut self, val: Self::Value); fn assume(&mut self, val: Self::Value);

View File

@ -363,7 +363,7 @@ pub fn check_intrinsic_type(
), ),
sym::const_allocate => { sym::const_allocate => {
(0, 0, vec![tcx.types.usize, tcx.types.usize], Ty::new_mut_ptr(tcx, tcx.types.u8)) (0, 1, vec![tcx.types.usize, tcx.types.usize], Ty::new_mut_ptr(tcx, tcx.types.u8))
} }
sym::const_deallocate => ( sym::const_deallocate => (
0, 0,

View File

@ -296,9 +296,9 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
if args.is_empty() { if args.is_empty() {
return; return;
} }
let ty = args.type_at(0);
let known_is_valid = intrinsic_assert_panics(self.tcx, self.param_env, ty, intrinsic_name); let known_is_valid =
intrinsic_assert_panics(self.tcx, self.param_env, args[0], intrinsic_name);
match known_is_valid { match known_is_valid {
// We don't know the layout or it's not validity assertion at all, don't touch it // We don't know the layout or it's not validity assertion at all, don't touch it
None => {} None => {}
@ -317,10 +317,11 @@ impl<'tcx> InstSimplifyContext<'tcx, '_> {
fn intrinsic_assert_panics<'tcx>( fn intrinsic_assert_panics<'tcx>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>, param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>, arg: ty::GenericArg<'tcx>,
intrinsic_name: Symbol, intrinsic_name: Symbol,
) -> Option<bool> { ) -> Option<bool> {
let requirement = ValidityRequirement::from_intrinsic(intrinsic_name)?; let requirement = ValidityRequirement::from_intrinsic(intrinsic_name)?;
let ty = arg.expect_ty();
Some(!tcx.check_validity_requirement((requirement, param_env.and(ty))).ok()?) Some(!tcx.check_validity_requirement((requirement, param_env.and(ty))).ok()?)
} }

View File

@ -956,19 +956,24 @@ fn visit_instance_use<'tcx>(
if !should_codegen_locally(tcx, &instance) { if !should_codegen_locally(tcx, &instance) {
return; return;
} }
// The intrinsics assert_inhabited, assert_zero_valid, and assert_mem_uninitialized_valid will
// be lowered in codegen to nothing or a call to panic_nounwind. So if we encounter any
// of those intrinsics, we need to include a mono item for panic_nounwind, else we may try to
// codegen a call to that function without generating code for the function itself.
if let ty::InstanceDef::Intrinsic(def_id) = instance.def { if let ty::InstanceDef::Intrinsic(def_id) = instance.def {
let name = tcx.item_name(def_id); let name = tcx.item_name(def_id);
if let Some(_requirement) = ValidityRequirement::from_intrinsic(name) { if let Some(_requirement) = ValidityRequirement::from_intrinsic(name) {
// The intrinsics assert_inhabited, assert_zero_valid, and assert_mem_uninitialized_valid will
// be lowered in codegen to nothing or a call to panic_nounwind. So if we encounter any
// of those intrinsics, we need to include a mono item for panic_nounwind, else we may try to
// codegen a call to that function without generating code for the function itself.
let def_id = tcx.lang_items().get(LangItem::PanicNounwind).unwrap(); let def_id = tcx.lang_items().get(LangItem::PanicNounwind).unwrap();
let panic_instance = Instance::mono(tcx, def_id); let panic_instance = Instance::mono(tcx, def_id);
if should_codegen_locally(tcx, &panic_instance) { if should_codegen_locally(tcx, &panic_instance) {
output.push(create_fn_mono_item(tcx, panic_instance, source)); output.push(create_fn_mono_item(tcx, panic_instance, source));
} }
} else if tcx.has_attr(def_id, sym::rustc_intrinsic) {
// Codegen the fallback body of intrinsics with fallback bodies
let instance = ty::Instance::new(def_id, instance.args);
if should_codegen_locally(tcx, &instance) {
output.push(create_fn_mono_item(tcx, instance, source));
}
} }
} }

View File

@ -2368,18 +2368,6 @@ extern "rust-intrinsic" {
#[rustc_nounwind] #[rustc_nounwind]
pub fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8; pub fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8;
/// Allocates a block of memory at compile time.
/// At runtime, just returns a null pointer.
///
/// # Safety
///
/// - The `align` argument must be a power of two.
/// - At compile time, a compile error occurs if this constraint is violated.
/// - At runtime, it is not checked.
#[rustc_const_unstable(feature = "const_heap", issue = "79597")]
#[rustc_nounwind]
pub fn const_allocate(size: usize, align: usize) -> *mut u8;
/// Deallocates a memory which allocated by `intrinsics::const_allocate` at compile time. /// Deallocates a memory which allocated by `intrinsics::const_allocate` at compile time.
/// At runtime, does nothing. /// At runtime, does nothing.
/// ///
@ -2594,6 +2582,22 @@ pub(crate) const unsafe fn debug_assertions() -> bool {
cfg!(debug_assertions) cfg!(debug_assertions)
} }
/// Allocates a block of memory at compile time.
/// At runtime, just returns a null pointer.
///
/// # Safety
///
/// - The `align` argument must be a power of two.
/// - At compile time, a compile error occurs if this constraint is violated.
/// - At runtime, it is not checked.
#[rustc_const_unstable(feature = "const_heap", issue = "79597")]
#[rustc_nounwind]
#[cfg_attr(not(bootstrap), rustc_intrinsic)]
pub const unsafe fn const_allocate(_size: usize, _align: usize) -> *mut u8 {
// const eval overrides this function, but runtime code should always just return null pointers.
crate::ptr::null_mut()
}
// Some functions are defined here because they accidentally got made // Some functions are defined here because they accidentally got made
// available in this module on stable. See <https://github.com/rust-lang/rust/issues/15702>. // available in this module on stable. See <https://github.com/rust-lang/rust/issues/15702>.
// (`transmute` also falls into this category, but it cannot be wrapped due to the // (`transmute` also falls into this category, but it cannot be wrapped due to the