Merge pull request #1064 from bjorn3/inline_asm

Basic inline asm support
This commit is contained in:
bjorn3 2020-07-10 22:03:19 +02:00 committed by GitHub
commit c73b9d2e86
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 262 additions and 41 deletions

View File

@ -76,5 +76,8 @@ function jit_calc() {
* Good non-rust abi support ([several problems](https://github.com/bjorn3/rustc_codegen_cranelift/issues/10))
* Proc macros
* Inline assembly ([no cranelift support](https://github.com/bytecodealliance/wasmtime/issues/1041), not coming soon)
* Inline assembly ([no cranelift support](https://github.com/bytecodealliance/wasmtime/issues/1041)
* On Linux there is support for invoking an external assembler for `global_asm!` and `asm!`.
`llvm_asm!` will remain unimplemented forever. `asm!` doesn't yet support reg classes. You
have to specify specific registers instead.
* SIMD ([tracked here](https://github.com/bjorn3/rustc_codegen_cranelift/issues/171), some basic things work)

View File

@ -36,6 +36,7 @@ pub(crate) fn trans_fn<'tcx, B: Backend + 'static>(
let mut fx = FunctionCx {
tcx,
module: &mut cx.module,
global_asm: &mut cx.global_asm,
pointer_type,
instance,
@ -307,24 +308,26 @@ fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, impl Backend>) {
TerminatorKind::InlineAsm {
template,
operands,
options: _,
options,
destination,
line_spans: _,
} => {
match template {
&[] => {
assert_eq!(operands, &[]);
match *destination {
Some(destination) => {
let destination_block = fx.get_block(destination);
fx.bcx.ins().jump(destination_block, &[]);
}
None => bug!(),
}
crate::inline_asm::codegen_inline_asm(
fx,
bb_data.terminator().source_info.span,
template,
operands,
*options,
);
// Black box
match *destination {
Some(destination) => {
let destination_block = fx.get_block(destination);
fx.bcx.ins().jump(destination_block, &[]);
}
None => {
crate::trap::trap_unreachable(fx, "[corruption] Returned from noreturn inline asm");
}
_ => fx.tcx.sess.span_fatal(bb_data.terminator().source_info.span, "Inline assembly is not supported"),
}
}
TerminatorKind::Resume | TerminatorKind::Abort => {

View File

@ -254,6 +254,7 @@ pub(crate) struct FunctionCx<'clif, 'tcx, B: Backend + 'static> {
// FIXME use a reference to `CodegenCx` instead of `tcx`, `module` and `constants` and `caches`
pub(crate) tcx: TyCtxt<'tcx>,
pub(crate) module: &'clif mut Module<B>,
pub(crate) global_asm: &'clif mut String,
pub(crate) pointer_type: Type, // Cached from module
pub(crate) instance: Instance<'tcx>,

View File

@ -112,21 +112,11 @@ fn module_codegen(tcx: TyCtxt<'_>, cgu_name: rustc_span::Symbol) -> ModuleCodege
let module = new_module(tcx, cgu_name.as_str().to_string());
let mut global_asm = Vec::new();
let mut cx = crate::CodegenCx::new(tcx, module, tcx.sess.opts.debuginfo != DebugInfo::None);
super::codegen_mono_items(&mut cx, &mut global_asm, mono_items);
let (mut module, debug, mut unwind_context) = tcx.sess.time("finalize CodegenCx", || cx.finalize());
super::codegen_mono_items(&mut cx, mono_items);
let (mut module, global_asm, debug, mut unwind_context) = tcx.sess.time("finalize CodegenCx", || cx.finalize());
crate::main_shim::maybe_create_entry_wrapper(tcx, &mut module, &mut unwind_context);
let global_asm = global_asm.into_iter().map(|hir_id| {
let item = tcx.hir().expect_item(hir_id);
if let rustc_hir::ItemKind::GlobalAsm(rustc_hir::GlobalAsm { asm }) = item.kind {
asm.as_str().to_string()
} else {
bug!("Expected GlobalAsm found {:?}", item);
}
}).collect::<Vec<String>>().join("\n");
let codegen_result = emit_module(
tcx,
cgu.name().as_str().to_string(),
@ -283,7 +273,7 @@ fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
}
// FIXME fix linker error on macOS
tcx.sess.fatal("global_asm! is not yet supported on macOS and Windows");
tcx.sess.fatal("asm! and global_asm! are not yet supported on macOS and Windows");
}
let assembler = crate::toolchain::get_toolchain_binary(tcx.sess, "as");

View File

@ -54,15 +54,13 @@ pub(super) fn run_jit(tcx: TyCtxt<'_>) -> ! {
let mut cx = crate::CodegenCx::new(tcx, jit_module, false);
let (mut jit_module, _debug, mut unwind_context) = super::time(tcx, "codegen mono items", || {
let mut global_asm = Vec::new();
super::codegen_mono_items(&mut cx, &mut global_asm, mono_items);
for hir_id in global_asm {
let item = tcx.hir().expect_item(hir_id);
tcx.sess.span_err(item.span, "Global asm is not supported in JIT mode");
}
let (mut jit_module, global_asm, _debug, mut unwind_context) = super::time(tcx, "codegen mono items", || {
super::codegen_mono_items(&mut cx, mono_items);
tcx.sess.time("finalize CodegenCx", || cx.finalize())
});
if !global_asm.is_empty() {
tcx.sess.fatal("Global asm is not supported in JIT mode");
}
crate::main_shim::maybe_create_entry_wrapper(tcx, &mut jit_module, &mut unwind_context);
crate::allocator::codegen(tcx, &mut jit_module, &mut unwind_context);

View File

@ -1,6 +1,5 @@
use std::any::Any;
use rustc_hir::HirId;
use rustc_middle::middle::cstore::EncodedMetadata;
use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
@ -32,7 +31,6 @@ pub(crate) fn codegen_crate(
fn codegen_mono_items<'tcx>(
cx: &mut crate::CodegenCx<'tcx, impl Backend + 'static>,
global_asm: &mut Vec<HirId>,
mono_items: Vec<(MonoItem<'tcx>, (RLinkage, Visibility))>,
) {
cx.tcx.sess.time("predefine functions", || {
@ -51,13 +49,12 @@ fn codegen_mono_items<'tcx>(
for (mono_item, (linkage, visibility)) in mono_items {
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
trans_mono_item(cx, global_asm, mono_item, linkage);
trans_mono_item(cx, mono_item, linkage);
}
}
fn trans_mono_item<'tcx, B: Backend + 'static>(
cx: &mut crate::CodegenCx<'tcx, B>,
global_asm: &mut Vec<HirId>,
mono_item: MonoItem<'tcx>,
linkage: Linkage,
) {
@ -94,7 +91,13 @@ fn trans_mono_item<'tcx, B: Backend + 'static>(
crate::constant::codegen_static(&mut cx.constants_cx, def_id);
}
MonoItem::GlobalAsm(hir_id) => {
global_asm.push(hir_id);
let item = tcx.hir().expect_item(hir_id);
if let rustc_hir::ItemKind::GlobalAsm(rustc_hir::GlobalAsm { asm }) = item.kind {
cx.global_asm.push_str(&*asm.as_str());
cx.global_asm.push_str("\n\n");
} else {
bug!("Expected GlobalAsm found {:?}", item);
}
}
}
}

220
src/inline_asm.rs Normal file
View File

@ -0,0 +1,220 @@
use crate::prelude::*;
use std::fmt::Write;
use rustc_ast::ast::{InlineAsmTemplatePiece, InlineAsmOptions};
use rustc_middle::mir::InlineAsmOperand;
use rustc_target::asm::*;
pub(crate) fn codegen_inline_asm<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
_span: Span,
template: &[InlineAsmTemplatePiece],
operands: &[InlineAsmOperand<'tcx>],
options: InlineAsmOptions,
) {
// FIXME add .eh_frame unwind info directives
if template.is_empty() {
// Black box
return;
}
let mut slot_size = Size::from_bytes(0);
let mut clobbered_regs = Vec::new();
let mut inputs = Vec::new();
let mut outputs = Vec::new();
let mut new_slot = |reg_class: InlineAsmRegClass| {
let reg_size = reg_class
.supported_types(InlineAsmArch::X86_64)
.iter()
.map(|(ty, _)| ty.size())
.max()
.unwrap();
let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
slot_size = slot_size.align_to(align);
let offset = slot_size;
slot_size += reg_size;
offset
};
// FIXME overlap input and output slots to save stack space
for operand in operands {
match *operand {
InlineAsmOperand::In { reg, ref value } => {
let reg = expect_reg(reg);
clobbered_regs.push((reg, new_slot(reg.reg_class())));
inputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_operand(fx, value).load_scalar(fx)));
}
InlineAsmOperand::Out { reg, late: _, place } => {
let reg = expect_reg(reg);
clobbered_regs.push((reg, new_slot(reg.reg_class())));
if let Some(place) = place {
outputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_place(fx, place)));
}
}
InlineAsmOperand::InOut { reg, late: _, ref in_value, out_place } => {
let reg = expect_reg(reg);
clobbered_regs.push((reg, new_slot(reg.reg_class())));
inputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_operand(fx, in_value).load_scalar(fx)));
if let Some(out_place) = out_place {
outputs.push((reg, new_slot(reg.reg_class()), crate::base::trans_place(fx, out_place)));
}
}
InlineAsmOperand::Const { value: _ } => todo!(),
InlineAsmOperand::SymFn { value: _ } => todo!(),
InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
}
}
let asm_name = format!("{}__inline_asm_{}", fx.tcx.symbol_name(fx.instance).name, /*FIXME*/0);
let generated_asm = generate_asm_wrapper(&asm_name, InlineAsmArch::X86_64, options, template, clobbered_regs, &inputs, &outputs);
fx.global_asm.push_str(&generated_asm);
call_inline_asm(fx, &asm_name, slot_size, inputs, outputs);
}
fn generate_asm_wrapper(
asm_name: &str,
arch: InlineAsmArch,
options: InlineAsmOptions,
template: &[InlineAsmTemplatePiece],
clobbered_regs: Vec<(InlineAsmReg, Size)>,
inputs: &[(InlineAsmReg, Size, Value)],
outputs: &[(InlineAsmReg, Size, CPlace<'_>)],
) -> String {
let mut generated_asm = String::new();
writeln!(generated_asm, ".globl {}", asm_name).unwrap();
writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
writeln!(generated_asm, "{}:", asm_name).unwrap();
generated_asm.push_str(".intel_syntax noprefix\n");
generated_asm.push_str(" push rbp\n");
generated_asm.push_str(" mov rbp,rdi\n");
// Save clobbered registers
if !options.contains(InlineAsmOptions::NORETURN) {
// FIXME skip registers saved by the calling convention
for &(reg, offset) in &clobbered_regs {
save_register(&mut generated_asm, arch, reg, offset);
}
}
// Write input registers
for &(reg, offset, _value) in inputs {
restore_register(&mut generated_asm, arch, reg, offset);
}
if options.contains(InlineAsmOptions::ATT_SYNTAX) {
generated_asm.push_str(".att_syntax\n");
}
// The actual inline asm
for piece in template {
match piece {
InlineAsmTemplatePiece::String(s) => {
generated_asm.push_str(s);
}
InlineAsmTemplatePiece::Placeholder { operand_idx: _, modifier: _, span: _ } => todo!(),
}
}
generated_asm.push('\n');
if options.contains(InlineAsmOptions::ATT_SYNTAX) {
generated_asm.push_str(".intel_syntax noprefix\n");
}
if !options.contains(InlineAsmOptions::NORETURN) {
// Read output registers
for &(reg, offset, _place) in outputs {
save_register(&mut generated_asm, arch, reg, offset);
}
// Restore clobbered registers
for &(reg, offset) in clobbered_regs.iter().rev() {
restore_register(&mut generated_asm, arch, reg, offset);
}
generated_asm.push_str(" pop rbp\n");
generated_asm.push_str(" ret\n");
} else {
generated_asm.push_str(" ud2\n");
}
generated_asm.push_str(".att_syntax\n");
writeln!(generated_asm, ".size {name}, .-{name}", name=asm_name).unwrap();
generated_asm.push_str(".text\n");
generated_asm.push_str("\n\n");
generated_asm
}
fn call_inline_asm<'tcx>(
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
asm_name: &str,
slot_size: Size,
inputs: Vec<(InlineAsmReg, Size, Value)>,
outputs: Vec<(InlineAsmReg, Size, CPlace<'tcx>)>,
) {
let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
offset: None,
size: u32::try_from(slot_size.bytes()).unwrap(),
});
#[cfg(debug_assertions)]
fx.add_comment(stack_slot, "inline asm scratch slot");
let inline_asm_func = fx.module.declare_function(asm_name, Linkage::Import, &Signature {
call_conv: CallConv::SystemV,
params: vec![AbiParam::new(fx.pointer_type)],
returns: vec![],
}).unwrap();
let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(inline_asm_func, asm_name);
for (_reg, offset, value) in inputs {
fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
}
let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
for (_reg, offset, place) in outputs {
let ty = fx.clif_type(place.layout().ty).unwrap();
let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
place.write_cvalue(fx, CValue::by_val(value, place.layout()));
}
}
fn expect_reg(reg_or_class: InlineAsmRegOrRegClass) -> InlineAsmReg {
match reg_or_class {
InlineAsmRegOrRegClass::Reg(reg) => reg,
InlineAsmRegOrRegClass::RegClass(class) => unimplemented!("{:?}", class),
}
}
fn save_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) {
match arch {
InlineAsmArch::X86_64 => {
write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
generated_asm.push('\n');
}
_ => unimplemented!("save_register for {:?}", arch),
}
}
fn restore_register(generated_asm: &mut String, arch: InlineAsmArch, reg: InlineAsmReg, offset: Size) {
match arch {
InlineAsmArch::X86_64 => {
generated_asm.push_str(" mov ");
reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
}
_ => unimplemented!("restore_register for {:?}", arch),
}
}

View File

@ -55,6 +55,7 @@ mod constant;
mod debuginfo;
mod discriminant;
mod driver;
mod inline_asm;
mod intrinsics;
mod linkage;
mod main_shim;
@ -123,6 +124,7 @@ impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
struct CodegenCx<'tcx, B: Backend + 'static> {
tcx: TyCtxt<'tcx>,
module: Module<B>,
global_asm: String,
constants_cx: ConstantCx,
cached_context: Context,
vtables: FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), DataId>,
@ -148,6 +150,7 @@ impl<'tcx, B: Backend + 'static> CodegenCx<'tcx, B> {
CodegenCx {
tcx,
module,
global_asm: String::new(),
constants_cx: ConstantCx::default(),
cached_context: Context::new(),
vtables: FxHashMap::default(),
@ -156,9 +159,9 @@ impl<'tcx, B: Backend + 'static> CodegenCx<'tcx, B> {
}
}
fn finalize(mut self) -> (Module<B>, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) {
fn finalize(mut self) -> (Module<B>, String, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) {
self.constants_cx.finalize(self.tcx, &mut self.module);
(self.module, self.debug_context, self.unwind_context)
(self.module, self.global_asm, self.debug_context, self.unwind_context)
}
}