2019-02-17 18:58:58 +00:00
|
|
|
use crate::builder::Builder;
|
|
|
|
use crate::type_::Type;
|
|
|
|
use crate::type_of::LayoutLlvmExt;
|
|
|
|
use crate::value::Value;
|
2018-10-23 23:13:33 +00:00
|
|
|
use rustc_codegen_ssa::mir::operand::OperandRef;
|
2020-06-30 08:57:59 +00:00
|
|
|
use rustc_codegen_ssa::{
|
|
|
|
common::IntPredicate,
|
2022-12-06 05:07:28 +00:00
|
|
|
traits::{BaseTypeMethods, BuilderMethods, ConstMethods},
|
2019-12-22 22:42:04 +00:00
|
|
|
};
|
2021-08-30 14:38:27 +00:00
|
|
|
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
|
2020-03-29 15:19:48 +00:00
|
|
|
use rustc_middle::ty::Ty;
|
2021-08-30 14:38:27 +00:00
|
|
|
use rustc_target::abi::{Align, Endian, HasDataLayout, Size};
|
2018-10-23 23:13:33 +00:00
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
fn round_pointer_up_to_alignment<'ll>(
|
|
|
|
bx: &mut Builder<'_, 'll, '_>,
|
2018-10-23 23:13:33 +00:00
|
|
|
addr: &'ll Value,
|
|
|
|
align: Align,
|
|
|
|
ptr_ty: &'ll Type,
|
|
|
|
) -> &'ll Value {
|
|
|
|
let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
|
|
|
|
ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
|
|
|
|
ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
|
|
|
|
bx.inttoptr(ptr_as_int, ptr_ty)
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
fn emit_direct_ptr_va_arg<'ll, 'tcx>(
|
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-10-23 23:13:33 +00:00
|
|
|
list: OperandRef<'tcx, &'ll Value>,
|
|
|
|
size: Size,
|
|
|
|
align: Align,
|
|
|
|
slot_size: Align,
|
|
|
|
allow_higher_align: bool,
|
|
|
|
) -> (&'ll Value, Align) {
|
2022-12-06 05:07:28 +00:00
|
|
|
let va_list_ty = bx.type_ptr();
|
|
|
|
let va_list_addr = list.immediate();
|
2018-10-23 23:13:33 +00:00
|
|
|
|
2021-07-04 16:53:04 +00:00
|
|
|
let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
|
2018-10-23 23:13:33 +00:00
|
|
|
|
|
|
|
let (addr, addr_align) = if allow_higher_align && align > slot_size {
|
2022-12-06 05:07:28 +00:00
|
|
|
(round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align)
|
2018-10-23 23:13:33 +00:00
|
|
|
} else {
|
|
|
|
(ptr, slot_size)
|
|
|
|
};
|
|
|
|
|
|
|
|
let aligned_size = size.align_to(slot_size).bytes() as i32;
|
|
|
|
let full_direct_size = bx.cx().const_i32(aligned_size);
|
2024-02-24 07:01:41 +00:00
|
|
|
let next = bx.inbounds_ptradd(addr, full_direct_size);
|
2018-10-23 23:13:33 +00:00
|
|
|
bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
|
|
|
|
|
2021-01-05 01:01:29 +00:00
|
|
|
if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
|
2018-10-23 23:13:33 +00:00
|
|
|
let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
|
2024-02-24 07:01:41 +00:00
|
|
|
let adjusted = bx.inbounds_ptradd(addr, adjusted_size);
|
2022-12-06 05:07:28 +00:00
|
|
|
(adjusted, addr_align)
|
2018-10-23 23:13:33 +00:00
|
|
|
} else {
|
2022-12-06 05:07:28 +00:00
|
|
|
(addr, addr_align)
|
2018-10-23 23:13:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
fn emit_ptr_va_arg<'ll, 'tcx>(
|
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-10-23 23:13:33 +00:00
|
|
|
list: OperandRef<'tcx, &'ll Value>,
|
|
|
|
target_ty: Ty<'tcx>,
|
|
|
|
indirect: bool,
|
|
|
|
slot_size: Align,
|
|
|
|
allow_higher_align: bool,
|
|
|
|
) -> &'ll Value {
|
|
|
|
let layout = bx.cx.layout_of(target_ty);
|
|
|
|
let (llty, size, align) = if indirect {
|
|
|
|
(
|
2023-07-05 19:13:26 +00:00
|
|
|
bx.cx.layout_of(Ty::new_imm_ptr(bx.cx.tcx, target_ty)).llvm_type(bx.cx),
|
2018-10-23 23:13:33 +00:00
|
|
|
bx.cx.data_layout().pointer_size,
|
|
|
|
bx.cx.data_layout().pointer_align,
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
(layout.llvm_type(bx.cx), layout.size, layout.align)
|
|
|
|
};
|
|
|
|
let (addr, addr_align) =
|
2022-12-06 05:07:28 +00:00
|
|
|
emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align);
|
2018-10-23 23:13:33 +00:00
|
|
|
if indirect {
|
2021-07-04 16:53:04 +00:00
|
|
|
let tmp_ret = bx.load(llty, addr, addr_align);
|
|
|
|
bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
|
2018-10-23 23:13:33 +00:00
|
|
|
} else {
|
2021-07-04 16:53:04 +00:00
|
|
|
bx.load(llty, addr, addr_align)
|
2018-10-23 23:13:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
fn emit_aapcs_va_arg<'ll, 'tcx>(
|
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2020-06-30 08:57:59 +00:00
|
|
|
list: OperandRef<'tcx, &'ll Value>,
|
|
|
|
target_ty: Ty<'tcx>,
|
|
|
|
) -> &'ll Value {
|
2024-02-24 06:46:30 +00:00
|
|
|
let dl = bx.cx.data_layout();
|
|
|
|
|
2020-06-30 08:57:59 +00:00
|
|
|
// Implementation of the AAPCS64 calling convention for va_args see
|
|
|
|
// https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
|
2024-02-24 06:46:30 +00:00
|
|
|
//
|
|
|
|
// typedef struct va_list {
|
|
|
|
// void * stack; // next stack param
|
|
|
|
// void * gr_top; // end of GP arg reg save area
|
|
|
|
// void * vr_top; // end of FP/SIMD arg reg save area
|
|
|
|
// int gr_offs; // offset from gr_top to next GP register arg
|
|
|
|
// int vr_offs; // offset from vr_top to next FP/SIMD register arg
|
|
|
|
// } va_list;
|
2020-06-30 08:57:59 +00:00
|
|
|
let va_list_addr = list.immediate();
|
2024-02-24 06:46:30 +00:00
|
|
|
|
|
|
|
// There is no padding between fields since `void*` is size=8 align=8, `int` is size=4 align=4.
|
|
|
|
// See https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
|
|
|
|
// Table 1, Byte size and byte alignment of fundamental data types
|
|
|
|
// Table 3, Mapping of C & C++ built-in data types
|
|
|
|
let ptr_offset = 8;
|
|
|
|
let i32_offset = 4;
|
2024-02-24 07:01:41 +00:00
|
|
|
let gr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(ptr_offset));
|
|
|
|
let vr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * ptr_offset));
|
|
|
|
let gr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset));
|
|
|
|
let vr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset + i32_offset));
|
2024-02-24 06:46:30 +00:00
|
|
|
|
2020-06-30 08:57:59 +00:00
|
|
|
let layout = bx.cx.layout_of(target_ty);
|
|
|
|
|
2022-02-18 14:10:56 +00:00
|
|
|
let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
|
|
|
|
let in_reg = bx.append_sibling_block("va_arg.in_reg");
|
|
|
|
let on_stack = bx.append_sibling_block("va_arg.on_stack");
|
|
|
|
let end = bx.append_sibling_block("va_arg.end");
|
2020-06-30 08:57:59 +00:00
|
|
|
let zero = bx.const_i32(0);
|
|
|
|
let offset_align = Align::from_bytes(4).unwrap();
|
|
|
|
|
|
|
|
let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
|
2024-02-24 06:46:30 +00:00
|
|
|
let (reg_off, reg_top, slot_size) = if gr_type {
|
2020-06-30 08:57:59 +00:00
|
|
|
let nreg = (layout.size.bytes() + 7) / 8;
|
2024-02-24 06:46:30 +00:00
|
|
|
(gr_offs, gr_top, nreg * 8)
|
2020-06-30 08:57:59 +00:00
|
|
|
} else {
|
|
|
|
let nreg = (layout.size.bytes() + 15) / 16;
|
2024-02-24 06:46:30 +00:00
|
|
|
(vr_offs, vr_top, nreg * 16)
|
2020-06-30 08:57:59 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
// if the offset >= 0 then the value will be on the stack
|
2021-07-04 16:53:04 +00:00
|
|
|
let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
|
2020-06-30 08:57:59 +00:00
|
|
|
let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
|
2022-02-18 14:10:56 +00:00
|
|
|
bx.cond_br(use_stack, on_stack, maybe_reg);
|
2020-06-30 08:57:59 +00:00
|
|
|
|
|
|
|
// The value at this point might be in a register, but there is a chance that
|
|
|
|
// it could be on the stack so we have to update the offset and then check
|
|
|
|
// the offset again.
|
|
|
|
|
2022-02-18 14:37:31 +00:00
|
|
|
bx.switch_to_block(maybe_reg);
|
2020-07-21 12:46:22 +00:00
|
|
|
if gr_type && layout.align.abi.bytes() > 8 {
|
2022-02-18 14:37:31 +00:00
|
|
|
reg_off_v = bx.add(reg_off_v, bx.const_i32(15));
|
|
|
|
reg_off_v = bx.and(reg_off_v, bx.const_i32(-16));
|
2020-06-30 08:57:59 +00:00
|
|
|
}
|
2022-02-18 14:37:31 +00:00
|
|
|
let new_reg_off_v = bx.add(reg_off_v, bx.const_i32(slot_size as i32));
|
2020-06-30 08:57:59 +00:00
|
|
|
|
2022-02-18 14:37:31 +00:00
|
|
|
bx.store(new_reg_off_v, reg_off, offset_align);
|
2020-06-30 08:57:59 +00:00
|
|
|
|
|
|
|
// Check to see if we have overflowed the registers as a result of this.
|
|
|
|
// If we have then we need to use the stack for this value
|
2022-02-18 14:37:31 +00:00
|
|
|
let use_stack = bx.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
|
|
|
|
bx.cond_br(use_stack, on_stack, in_reg);
|
2020-06-30 08:57:59 +00:00
|
|
|
|
2022-02-18 14:37:31 +00:00
|
|
|
bx.switch_to_block(in_reg);
|
2022-12-06 05:07:28 +00:00
|
|
|
let top_type = bx.type_ptr();
|
2024-02-24 06:46:30 +00:00
|
|
|
let top = bx.load(top_type, reg_top, dl.pointer_align.abi);
|
2020-06-30 08:57:59 +00:00
|
|
|
|
|
|
|
// reg_value = *(@top + reg_off_v);
|
2024-02-24 07:01:41 +00:00
|
|
|
let mut reg_addr = bx.ptradd(top, reg_off_v);
|
2021-01-20 17:06:29 +00:00
|
|
|
if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
|
|
|
|
// On big-endian systems the value is right-aligned in its slot.
|
|
|
|
let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
|
2024-02-24 07:01:41 +00:00
|
|
|
reg_addr = bx.ptradd(reg_addr, offset);
|
2021-01-20 17:06:29 +00:00
|
|
|
}
|
2021-07-04 16:53:04 +00:00
|
|
|
let reg_type = layout.llvm_type(bx);
|
2022-02-18 14:37:31 +00:00
|
|
|
let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
|
|
|
|
bx.br(end);
|
2020-06-30 08:57:59 +00:00
|
|
|
|
|
|
|
// On Stack block
|
2022-02-18 14:37:31 +00:00
|
|
|
bx.switch_to_block(on_stack);
|
2020-06-30 08:57:59 +00:00
|
|
|
let stack_value =
|
2022-02-18 14:37:31 +00:00
|
|
|
emit_ptr_va_arg(bx, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
|
|
|
|
bx.br(end);
|
2020-06-30 08:57:59 +00:00
|
|
|
|
2022-02-18 14:37:31 +00:00
|
|
|
bx.switch_to_block(end);
|
|
|
|
let val =
|
|
|
|
bx.phi(layout.immediate_llvm_type(bx), &[reg_value, stack_value], &[in_reg, on_stack]);
|
2020-06-30 08:57:59 +00:00
|
|
|
|
|
|
|
val
|
|
|
|
}
|
|
|
|
|
2022-12-06 16:36:09 +00:00
|
|
|
fn emit_s390x_va_arg<'ll, 'tcx>(
|
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
|
|
|
list: OperandRef<'tcx, &'ll Value>,
|
|
|
|
target_ty: Ty<'tcx>,
|
|
|
|
) -> &'ll Value {
|
2024-02-24 06:46:30 +00:00
|
|
|
let dl = bx.cx.data_layout();
|
|
|
|
|
2022-12-06 16:36:09 +00:00
|
|
|
// Implementation of the s390x ELF ABI calling convention for va_args see
|
|
|
|
// https://github.com/IBM/s390x-abi (chapter 1.2.4)
|
2024-02-24 06:46:30 +00:00
|
|
|
//
|
|
|
|
// typedef struct __va_list_tag {
|
|
|
|
// long __gpr;
|
|
|
|
// long __fpr;
|
|
|
|
// void *__overflow_arg_area;
|
|
|
|
// void *__reg_save_area;
|
|
|
|
// } va_list[1];
|
2022-12-06 16:36:09 +00:00
|
|
|
let va_list_addr = list.immediate();
|
2024-02-24 06:46:30 +00:00
|
|
|
|
|
|
|
// There is no padding between fields since `long` and `void*` both have size=8 align=8.
|
|
|
|
// https://github.com/IBM/s390x-abi (Table 1.1.: Scalar types)
|
|
|
|
let i64_offset = 8;
|
|
|
|
let ptr_offset = 8;
|
|
|
|
let gpr = va_list_addr;
|
2024-02-24 07:01:41 +00:00
|
|
|
let fpr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(i64_offset));
|
|
|
|
let overflow_arg_area = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset));
|
|
|
|
let reg_save_area =
|
|
|
|
bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset + ptr_offset));
|
2024-02-24 06:46:30 +00:00
|
|
|
|
2022-12-06 16:36:09 +00:00
|
|
|
let layout = bx.cx.layout_of(target_ty);
|
|
|
|
|
|
|
|
let in_reg = bx.append_sibling_block("va_arg.in_reg");
|
|
|
|
let in_mem = bx.append_sibling_block("va_arg.in_mem");
|
|
|
|
let end = bx.append_sibling_block("va_arg.end");
|
|
|
|
|
|
|
|
// FIXME: vector ABI not yet supported.
|
|
|
|
let target_ty_size = bx.cx.size_of(target_ty).bytes();
|
|
|
|
let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
|
|
|
|
let unpadded_size = if indirect { 8 } else { target_ty_size };
|
|
|
|
let padded_size = 8;
|
|
|
|
let padding = padded_size - unpadded_size;
|
|
|
|
|
|
|
|
let gpr_type = indirect || !layout.is_single_fp_element(bx.cx);
|
2024-02-24 06:46:30 +00:00
|
|
|
let (max_regs, reg_count, reg_save_index, reg_padding) =
|
|
|
|
if gpr_type { (5, gpr, 2, padding) } else { (4, fpr, 16, 0) };
|
2022-12-06 16:36:09 +00:00
|
|
|
|
|
|
|
// Check whether the value was passed in a register or in memory.
|
|
|
|
let reg_count_v = bx.load(bx.type_i64(), reg_count, Align::from_bytes(8).unwrap());
|
|
|
|
let use_regs = bx.icmp(IntPredicate::IntULT, reg_count_v, bx.const_u64(max_regs));
|
|
|
|
bx.cond_br(use_regs, in_reg, in_mem);
|
|
|
|
|
|
|
|
// Emit code to load the value if it was passed in a register.
|
|
|
|
bx.switch_to_block(in_reg);
|
|
|
|
|
|
|
|
// Work out the address of the value in the register save area.
|
2024-02-24 06:46:30 +00:00
|
|
|
let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, dl.pointer_align.abi);
|
2022-12-06 16:36:09 +00:00
|
|
|
let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
|
|
|
|
let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
|
2024-02-24 07:01:41 +00:00
|
|
|
let reg_addr = bx.ptradd(reg_ptr_v, reg_off);
|
2022-12-06 16:36:09 +00:00
|
|
|
|
|
|
|
// Update the register count.
|
|
|
|
let new_reg_count_v = bx.add(reg_count_v, bx.const_u64(1));
|
|
|
|
bx.store(new_reg_count_v, reg_count, Align::from_bytes(8).unwrap());
|
|
|
|
bx.br(end);
|
|
|
|
|
|
|
|
// Emit code to load the value if it was passed in memory.
|
|
|
|
bx.switch_to_block(in_mem);
|
|
|
|
|
|
|
|
// Work out the address of the value in the argument overflow area.
|
2024-02-24 06:46:30 +00:00
|
|
|
let arg_ptr_v =
|
|
|
|
bx.load(bx.type_ptr(), overflow_arg_area, bx.tcx().data_layout.pointer_align.abi);
|
2022-12-06 16:36:09 +00:00
|
|
|
let arg_off = bx.const_u64(padding);
|
2024-02-24 07:01:41 +00:00
|
|
|
let mem_addr = bx.ptradd(arg_ptr_v, arg_off);
|
2022-12-06 16:36:09 +00:00
|
|
|
|
|
|
|
// Update the argument overflow area pointer.
|
|
|
|
let arg_size = bx.cx().const_u64(padded_size);
|
2024-02-24 07:01:41 +00:00
|
|
|
let new_arg_ptr_v = bx.inbounds_ptradd(arg_ptr_v, arg_size);
|
2024-02-24 06:46:30 +00:00
|
|
|
bx.store(new_arg_ptr_v, overflow_arg_area, dl.pointer_align.abi);
|
2022-12-06 16:36:09 +00:00
|
|
|
bx.br(end);
|
|
|
|
|
|
|
|
// Return the appropriate result.
|
|
|
|
bx.switch_to_block(end);
|
2022-12-06 05:07:28 +00:00
|
|
|
let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
|
2022-12-06 16:36:09 +00:00
|
|
|
let val_type = layout.llvm_type(bx);
|
2024-02-24 06:46:30 +00:00
|
|
|
let val_addr =
|
|
|
|
if indirect { bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) } else { val_addr };
|
2022-12-06 16:36:09 +00:00
|
|
|
bx.load(val_type, val_addr, layout.align.abi)
|
|
|
|
}
|
|
|
|
|
2021-12-14 18:49:49 +00:00
|
|
|
pub(super) fn emit_va_arg<'ll, 'tcx>(
|
|
|
|
bx: &mut Builder<'_, 'll, 'tcx>,
|
2018-10-23 23:13:33 +00:00
|
|
|
addr: OperandRef<'tcx, &'ll Value>,
|
|
|
|
target_ty: Ty<'tcx>,
|
|
|
|
) -> &'ll Value {
|
|
|
|
// Determine the va_arg implementation to use. The LLVM va_arg instruction
|
|
|
|
// is lacking in some instances, so we should only use it as a fallback.
|
2020-10-15 09:44:00 +00:00
|
|
|
let target = &bx.cx.tcx.sess.target;
|
|
|
|
let arch = &bx.cx.tcx.sess.target.arch;
|
2020-10-24 13:44:57 +00:00
|
|
|
match &**arch {
|
2018-12-05 02:44:08 +00:00
|
|
|
// Windows x86
|
2020-11-08 11:27:51 +00:00
|
|
|
"x86" if target.is_like_windows => {
|
2018-10-23 23:13:33 +00:00
|
|
|
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
|
|
|
|
}
|
2018-12-05 02:44:08 +00:00
|
|
|
// Generic x86
|
2020-10-24 13:44:57 +00:00
|
|
|
"x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true),
|
2019-01-20 11:47:29 +00:00
|
|
|
// Windows AArch64
|
Add arm64ec-pc-windows-msvc target
Introduces the `arm64ec-pc-windows-msvc` target for building Arm64EC ("Emulation Compatible") binaries for Windows.
For more information about Arm64EC see <https://learn.microsoft.com/en-us/windows/arm/arm64ec>.
Tier 3 policy:
> A tier 3 target must have a designated developer or developers (the "target maintainers") on record to be CCed when issues arise regarding the target. (The mechanism to track and CC such developers may evolve over time.)
I will be the maintainer for this target.
> Targets must use naming consistent with any existing targets; for instance, a target for the same CPU or OS as an existing Rust target should use the same name for that CPU or OS. Targets should normally use the same names and naming conventions as used elsewhere in the broader ecosystem beyond Rust (such as in other toolchains), unless they have a very good reason to diverge. Changing the name of a target can be highly disruptive, especially once the target reaches a higher tier, so getting the name right is important even for a tier 3 target.
Target uses the `arm64ec` architecture to match LLVM and MSVC, and the `-pc-windows-msvc` suffix to indicate that it targets Windows via the MSVC environment.
> Target names should not introduce undue confusion or ambiguity unless absolutely necessary to maintain ecosystem compatibility. For example, if the name of the target makes people extremely likely to form incorrect beliefs about what it targets, the name should be changed or augmented to disambiguate it.
Target name exactly specifies the type of code that will be produced.
> If possible, use only letters, numbers, dashes and underscores for the name. Periods (.) are known to cause issues in Cargo.
Done.
> Tier 3 targets may have unusual requirements to build or use, but must not create legal issues or impose onerous legal terms for the Rust project or for Rust developers or users.
> The target must not introduce license incompatibilities.
Uses the same dependencies, requirements and licensing as the other `*-pc-windows-msvc` targets.
> Anything added to the Rust repository must be under the standard Rust license (MIT OR Apache-2.0).
Understood.
> The target must not cause the Rust tools or libraries built for any other host (even when supporting cross-compilation to the target) to depend on any new dependency less permissive than the Rust licensing policy. This applies whether the dependency is a Rust crate that would require adding new license exceptions (as specified by the tidy tool in the rust-lang/rust repository), or whether the dependency is a native library or binary. In other words, the introduction of the target must not cause a user installing or running a version of Rust or the Rust tools to be subject to any new license requirements.
> Compiling, linking, and emitting functional binaries, libraries, or other code for the target (whether hosted on the target itself or cross-compiling from another target) must not depend on proprietary (non-FOSS) libraries. Host tools built for the target itself may depend on the ordinary runtime libraries supplied by the platform and commonly used by other applications built for the target, but those libraries must not be required for code generation for the target; cross-compilation to the target must not require such libraries at all. For instance, rustc built for the target may depend on a common proprietary C runtime library or console output library, but must not depend on a proprietary code generation library or code optimization library. Rust's license permits such combinations, but the Rust project has no interest in maintaining such combinations within the scope of Rust itself, even at tier 3.
> "onerous" here is an intentionally subjective term. At a minimum, "onerous" legal/licensing terms include but are not limited to: non-disclosure requirements, non-compete requirements, contributor license agreements (CLAs) or equivalent, "non-commercial"/"research-only"/etc terms, requirements conditional on the employer or employment of any particular Rust developers, revocable terms, any requirements that create liability for the Rust project or its developers or users, or any requirements that adversely affect the livelihood or prospects of the Rust project or its developers or users.
Uses the same dependencies, requirements and licensing as the other `*-pc-windows-msvc` targets.
> Neither this policy nor any decisions made regarding targets shall create any binding agreement or estoppel by any party. If any member of an approving Rust team serves as one of the maintainers of a target, or has any legal or employment requirement (explicit or implicit) that might affect their decisions regarding a target, they must recuse themselves from any approval decisions regarding the target's tier status, though they may otherwise participate in discussions.
> This requirement does not prevent part or all of this policy from being cited in an explicit contract or work agreement (e.g. to implement or maintain support for a target). This requirement exists to ensure that a developer or team responsible for reviewing and approving a target does not face any legal threats or obligations that would prevent them from freely exercising their judgment in such approval, even if such judgment involves subjective matters or goes beyond the letter of these requirements.
Understood, I am not a member of the Rust team.
> Tier 3 targets should attempt to implement as much of the standard libraries as possible and appropriate (core for most targets, alloc for targets that can support dynamic memory allocation, std for targets with an operating system or equivalent layer of system-provided functionality), but may leave some code unimplemented (either unavailable or stubbed out as appropriate), whether because the target makes it impossible to implement or challenging to implement. The authors of pull requests are not obligated to avoid calling any portions of the standard library on the basis of a tier 3 target not implementing those portions.
Both `core` and `alloc` are supported.
Support for `std` dependends on making changes to the standard library, `stdarch` and `backtrace` which cannot be done yet as the bootstrapping compiler raises a warning ("unexpected `cfg` condition value") for `target_arch = "arm64ec"`.
> The target must provide documentation for the Rust community explaining how to build for the target, using cross-compilation if possible. If the target supports running binaries, or running tests (even if they do not pass), the documentation must explain how to run such binaries or tests for the target, using emulation if possible or dedicated hardware if necessary.
Documentation is provided in src/doc/rustc/src/platform-support/arm64ec-pc-windows-msvc.md
> Tier 3 targets must not impose burden on the authors of pull requests, or other developers in the community, to maintain the target. In particular, do not post comments (automated or manual) on a PR that derail or suggest a block on the PR based on a tier 3 target. Do not send automated messages or notifications (via any medium, including via @) to a PR author or others involved with a PR regarding a tier 3 target, unless they have opted into such messages.
> Backlinks such as those generated by the issue/PR tracker when linking to an issue or PR are not considered a violation of this policy, within reason. However, such messages (even on a separate repository) must not generate notifications to anyone involved with a PR who has not requested such notifications.
> Patches adding or updating tier 3 targets must not break any existing tier 2 or tier 1 target, and must not knowingly break another tier 3 target without approval of either the compiler team or the maintainers of the other tier 3 target.
> In particular, this may come up when working on closely related targets, such as variations of the same architecture with different features. Avoid introducing unconditional uses of features that another variation of the target may not have; use conditional compilation or runtime detection, as appropriate, to let each target run code supported by that target.
Understood.
2023-12-16 00:46:34 +00:00
|
|
|
"aarch64" | "arm64ec" if target.is_like_windows => {
|
2018-12-05 02:44:08 +00:00
|
|
|
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
|
|
|
|
}
|
2020-10-20 01:48:58 +00:00
|
|
|
// macOS / iOS AArch64
|
2020-11-08 11:27:51 +00:00
|
|
|
"aarch64" if target.is_like_osx => {
|
2018-12-05 02:44:08 +00:00
|
|
|
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
|
|
|
|
}
|
2020-10-24 13:44:57 +00:00
|
|
|
"aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
|
2022-12-06 16:36:09 +00:00
|
|
|
"s390x" => emit_s390x_va_arg(bx, addr, target_ty),
|
2018-12-05 02:44:08 +00:00
|
|
|
// Windows x86_64
|
2020-11-08 11:27:51 +00:00
|
|
|
"x86_64" if target.is_like_windows => {
|
2018-10-23 23:13:33 +00:00
|
|
|
let target_ty_size = bx.cx.size_of(target_ty).bytes();
|
2020-03-03 01:07:15 +00:00
|
|
|
let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
|
2018-10-23 23:13:33 +00:00
|
|
|
emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
|
|
|
|
}
|
2018-12-05 02:44:08 +00:00
|
|
|
// For all other architecture/OS combinations fall back to using
|
|
|
|
// the LLVM va_arg instruction.
|
|
|
|
// https://llvm.org/docs/LangRef.html#va-arg-instruction
|
2019-03-25 21:28:03 +00:00
|
|
|
_ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)),
|
2018-10-23 23:13:33 +00:00
|
|
|
}
|
|
|
|
}
|