2016-08-02 20:10:10 +00:00
|
|
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2017-02-04 10:22:05 +00:00
|
|
|
// FIXME: Rename 'DIGlobalVariable' to 'DIGlobalVariableExpression'
|
|
|
|
// once support for LLVM 3.9 is dropped.
|
|
|
|
//
|
|
|
|
// This method was changed in this LLVM patch:
|
|
|
|
// https://reviews.llvm.org/D26769
|
|
|
|
|
2018-05-29 17:41:36 +00:00
|
|
|
use super::debuginfo::{
|
2018-06-27 10:12:47 +00:00
|
|
|
DIBuilderRef, DIDescriptor_opaque, DIDescriptor, DIFile, DILexicalBlock, DISubprogram, DIType_opaque,
|
|
|
|
DIType, DIBasicType, DIDerivedType, DICompositeType, DIScope_opaque, DIScope, DIVariable,
|
|
|
|
DIGlobalVariable, DIArray_opaque, DIArray, DISubrange, DITemplateTypeParameter, DIEnumerator,
|
2018-05-29 17:41:36 +00:00
|
|
|
DINameSpace, DIFlags,
|
|
|
|
};
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-08-02 21:25:19 +00:00
|
|
|
use libc::{c_uint, c_int, size_t, c_char};
|
2016-08-02 20:10:10 +00:00
|
|
|
use libc::{c_longlong, c_ulonglong, c_void};
|
|
|
|
|
2018-06-27 10:12:47 +00:00
|
|
|
use std::ptr::NonNull;
|
|
|
|
|
2018-05-29 17:41:36 +00:00
|
|
|
use super::RustStringRef;
|
2016-08-02 21:25:19 +00:00
|
|
|
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type Opcode = u32;
|
|
|
|
pub type Bool = c_uint;
|
|
|
|
|
|
|
|
pub const True: Bool = 1 as Bool;
|
|
|
|
pub const False: Bool = 0 as Bool;
|
|
|
|
|
|
|
|
#[derive(Copy, Clone, PartialEq)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum LLVMRustResult {
|
|
|
|
Success,
|
|
|
|
Failure,
|
|
|
|
}
|
|
|
|
// Consts for the LLVM CallConv type, pre-cast to usize.
|
|
|
|
|
|
|
|
/// LLVM CallingConv::ID. Should we wrap this?
|
2016-12-31 02:55:29 +00:00
|
|
|
#[derive(Copy, Clone, PartialEq, Debug)]
|
2016-08-02 20:10:10 +00:00
|
|
|
#[repr(C)]
|
|
|
|
pub enum CallConv {
|
|
|
|
CCallConv = 0,
|
|
|
|
FastCallConv = 8,
|
|
|
|
ColdCallConv = 9,
|
|
|
|
X86StdcallCallConv = 64,
|
|
|
|
X86FastcallCallConv = 65,
|
2016-11-16 22:33:23 +00:00
|
|
|
ArmAapcsCallConv = 67,
|
2016-12-19 04:45:20 +00:00
|
|
|
Msp430Intr = 69,
|
2017-05-17 13:40:46 +00:00
|
|
|
X86_ThisCall = 70,
|
2016-12-22 21:24:29 +00:00
|
|
|
PtxKernel = 71,
|
2016-06-27 00:34:02 +00:00
|
|
|
X86_64_SysV = 78,
|
2016-08-02 20:10:10 +00:00
|
|
|
X86_64_Win64 = 79,
|
2016-10-22 13:07:35 +00:00
|
|
|
X86_VectorCall = 80,
|
2017-02-14 20:39:42 +00:00
|
|
|
X86_Intr = 83,
|
2018-07-02 03:42:00 +00:00
|
|
|
AmdGpuKernel = 91,
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|
|
|
|
|
2016-09-01 18:52:33 +00:00
|
|
|
/// LLVMRustLinkage
|
2016-08-02 20:10:10 +00:00
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
|
|
|
#[repr(C)]
|
|
|
|
pub enum Linkage {
|
|
|
|
ExternalLinkage = 0,
|
|
|
|
AvailableExternallyLinkage = 1,
|
|
|
|
LinkOnceAnyLinkage = 2,
|
|
|
|
LinkOnceODRLinkage = 3,
|
2016-09-01 18:52:33 +00:00
|
|
|
WeakAnyLinkage = 4,
|
|
|
|
WeakODRLinkage = 5,
|
|
|
|
AppendingLinkage = 6,
|
|
|
|
InternalLinkage = 7,
|
|
|
|
PrivateLinkage = 8,
|
|
|
|
ExternalWeakLinkage = 9,
|
|
|
|
CommonLinkage = 10,
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|
|
|
|
|
2016-11-28 22:44:51 +00:00
|
|
|
// LLVMRustVisibility
|
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
|
|
|
#[repr(C)]
|
|
|
|
pub enum Visibility {
|
|
|
|
Default = 0,
|
|
|
|
Hidden = 1,
|
|
|
|
Protected = 2,
|
|
|
|
}
|
|
|
|
|
2016-08-02 20:10:10 +00:00
|
|
|
/// LLVMDiagnosticSeverity
|
|
|
|
#[derive(Copy, Clone, Debug)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum DiagnosticSeverity {
|
|
|
|
Error = 0,
|
|
|
|
Warning = 1,
|
|
|
|
Remark = 2,
|
|
|
|
Note = 3,
|
|
|
|
}
|
|
|
|
|
2016-08-02 21:25:19 +00:00
|
|
|
/// LLVMDLLStorageClass
|
2016-08-02 20:10:10 +00:00
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
|
|
|
pub enum DLLStorageClass {
|
2016-10-22 13:07:35 +00:00
|
|
|
Default = 0,
|
|
|
|
DllImport = 1, // Function to be imported from DLL.
|
|
|
|
DllExport = 2, // Function to be accessible from DLL.
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|
|
|
|
|
2016-11-16 22:36:08 +00:00
|
|
|
/// Matches LLVMRustAttribute in rustllvm.h
|
|
|
|
/// Semantically a subset of the C++ enum llvm::Attribute::AttrKind,
|
|
|
|
/// though it is not ABI compatible (since it's a C++ enum)
|
|
|
|
#[repr(C)]
|
|
|
|
#[derive(Copy, Clone, Debug)]
|
|
|
|
pub enum Attribute {
|
|
|
|
AlwaysInline = 0,
|
|
|
|
ByVal = 1,
|
|
|
|
Cold = 2,
|
|
|
|
InlineHint = 3,
|
|
|
|
MinSize = 4,
|
|
|
|
Naked = 5,
|
|
|
|
NoAlias = 6,
|
|
|
|
NoCapture = 7,
|
|
|
|
NoInline = 8,
|
|
|
|
NonNull = 9,
|
|
|
|
NoRedZone = 10,
|
|
|
|
NoReturn = 11,
|
|
|
|
NoUnwind = 12,
|
|
|
|
OptimizeForSize = 13,
|
|
|
|
ReadOnly = 14,
|
|
|
|
SExt = 15,
|
|
|
|
StructRet = 16,
|
|
|
|
UWTable = 17,
|
|
|
|
ZExt = 18,
|
2016-12-21 18:42:10 +00:00
|
|
|
InReg = 19,
|
2016-12-30 04:28:11 +00:00
|
|
|
SanitizeThread = 20,
|
2017-02-03 23:58:47 +00:00
|
|
|
SanitizeAddress = 21,
|
2016-12-30 04:28:11 +00:00
|
|
|
SanitizeMemory = 22,
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// LLVMIntPredicate
|
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum IntPredicate {
|
|
|
|
IntEQ = 32,
|
|
|
|
IntNE = 33,
|
|
|
|
IntUGT = 34,
|
|
|
|
IntUGE = 35,
|
|
|
|
IntULT = 36,
|
|
|
|
IntULE = 37,
|
|
|
|
IntSGT = 38,
|
|
|
|
IntSGE = 39,
|
|
|
|
IntSLT = 40,
|
|
|
|
IntSLE = 41,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// LLVMRealPredicate
|
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum RealPredicate {
|
|
|
|
RealPredicateFalse = 0,
|
|
|
|
RealOEQ = 1,
|
|
|
|
RealOGT = 2,
|
|
|
|
RealOGE = 3,
|
|
|
|
RealOLT = 4,
|
|
|
|
RealOLE = 5,
|
|
|
|
RealONE = 6,
|
|
|
|
RealORD = 7,
|
|
|
|
RealUNO = 8,
|
|
|
|
RealUEQ = 9,
|
|
|
|
RealUGT = 10,
|
|
|
|
RealUGE = 11,
|
|
|
|
RealULT = 12,
|
|
|
|
RealULE = 13,
|
|
|
|
RealUNE = 14,
|
|
|
|
RealPredicateTrue = 15,
|
|
|
|
}
|
|
|
|
|
2016-08-02 21:25:19 +00:00
|
|
|
/// LLVMTypeKind
|
2016-08-02 20:10:10 +00:00
|
|
|
#[derive(Copy, Clone, PartialEq, Debug)]
|
|
|
|
#[repr(C)]
|
|
|
|
pub enum TypeKind {
|
2016-10-22 13:07:35 +00:00
|
|
|
Void = 0,
|
|
|
|
Half = 1,
|
|
|
|
Float = 2,
|
|
|
|
Double = 3,
|
|
|
|
X86_FP80 = 4,
|
|
|
|
FP128 = 5,
|
2016-08-02 20:10:10 +00:00
|
|
|
PPC_FP128 = 6,
|
2016-10-22 13:07:35 +00:00
|
|
|
Label = 7,
|
|
|
|
Integer = 8,
|
|
|
|
Function = 9,
|
|
|
|
Struct = 10,
|
|
|
|
Array = 11,
|
|
|
|
Pointer = 12,
|
|
|
|
Vector = 13,
|
|
|
|
Metadata = 14,
|
|
|
|
X86_MMX = 15,
|
|
|
|
Token = 16,
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// LLVMAtomicRmwBinOp
|
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum AtomicRmwBinOp {
|
|
|
|
AtomicXchg = 0,
|
2016-10-22 13:07:35 +00:00
|
|
|
AtomicAdd = 1,
|
|
|
|
AtomicSub = 2,
|
|
|
|
AtomicAnd = 3,
|
2016-08-02 20:10:10 +00:00
|
|
|
AtomicNand = 4,
|
2016-10-22 13:07:35 +00:00
|
|
|
AtomicOr = 5,
|
|
|
|
AtomicXor = 6,
|
|
|
|
AtomicMax = 7,
|
|
|
|
AtomicMin = 8,
|
2016-08-02 20:10:10 +00:00
|
|
|
AtomicUMax = 9,
|
|
|
|
AtomicUMin = 10,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// LLVMAtomicOrdering
|
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum AtomicOrdering {
|
|
|
|
NotAtomic = 0,
|
|
|
|
Unordered = 1,
|
|
|
|
Monotonic = 2,
|
|
|
|
// Consume = 3, // Not specified yet.
|
|
|
|
Acquire = 4,
|
|
|
|
Release = 5,
|
|
|
|
AcquireRelease = 6,
|
2016-10-22 13:07:35 +00:00
|
|
|
SequentiallyConsistent = 7,
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// LLVMRustSynchronizationScope
|
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum SynchronizationScope {
|
|
|
|
Other,
|
|
|
|
SingleThread,
|
|
|
|
CrossThread,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// LLVMRustFileType
|
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum FileType {
|
|
|
|
Other,
|
|
|
|
AssemblyFile,
|
|
|
|
ObjectFile,
|
|
|
|
}
|
|
|
|
|
2016-09-01 18:52:33 +00:00
|
|
|
/// LLVMMetadataType
|
2016-08-02 20:10:10 +00:00
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum MetadataType {
|
|
|
|
MD_dbg = 0,
|
|
|
|
MD_tbaa = 1,
|
|
|
|
MD_prof = 2,
|
|
|
|
MD_fpmath = 3,
|
|
|
|
MD_range = 4,
|
|
|
|
MD_tbaa_struct = 5,
|
|
|
|
MD_invariant_load = 6,
|
|
|
|
MD_alias_scope = 7,
|
|
|
|
MD_noalias = 8,
|
|
|
|
MD_nontemporal = 9,
|
|
|
|
MD_mem_parallel_loop_access = 10,
|
|
|
|
MD_nonnull = 11,
|
|
|
|
}
|
|
|
|
|
2016-08-02 21:25:19 +00:00
|
|
|
/// LLVMRustAsmDialect
|
2016-08-02 20:10:10 +00:00
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum AsmDialect {
|
2016-08-02 21:25:19 +00:00
|
|
|
Other,
|
|
|
|
Att,
|
|
|
|
Intel,
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// LLVMRustCodeGenOptLevel
|
|
|
|
#[derive(Copy, Clone, PartialEq)]
|
|
|
|
#[repr(C)]
|
|
|
|
pub enum CodeGenOptLevel {
|
|
|
|
Other,
|
|
|
|
None,
|
|
|
|
Less,
|
|
|
|
Default,
|
|
|
|
Aggressive,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// LLVMRelocMode
|
|
|
|
#[derive(Copy, Clone, PartialEq)]
|
|
|
|
#[repr(C)]
|
|
|
|
pub enum RelocMode {
|
2017-04-28 22:21:59 +00:00
|
|
|
Default,
|
|
|
|
Static,
|
|
|
|
PIC,
|
|
|
|
DynamicNoPic,
|
|
|
|
ROPI,
|
|
|
|
RWPI,
|
|
|
|
ROPI_RWPI,
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// LLVMRustCodeModel
|
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum CodeModel {
|
|
|
|
Other,
|
|
|
|
Small,
|
|
|
|
Kernel,
|
|
|
|
Medium,
|
|
|
|
Large,
|
2018-01-23 01:01:36 +00:00
|
|
|
None,
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// LLVMRustDiagnosticKind
|
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum DiagnosticKind {
|
|
|
|
Other,
|
|
|
|
InlineAsm,
|
|
|
|
StackSize,
|
|
|
|
DebugMetadataVersion,
|
|
|
|
SampleProfile,
|
|
|
|
OptimizationRemark,
|
|
|
|
OptimizationRemarkMissed,
|
|
|
|
OptimizationRemarkAnalysis,
|
|
|
|
OptimizationRemarkAnalysisFPCommute,
|
|
|
|
OptimizationRemarkAnalysisAliasing,
|
|
|
|
OptimizationRemarkOther,
|
|
|
|
OptimizationFailure,
|
2018-03-12 17:11:59 +00:00
|
|
|
PGOProfile,
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// LLVMRustArchiveKind
|
|
|
|
#[derive(Copy, Clone)]
|
2016-08-02 21:25:19 +00:00
|
|
|
#[repr(C)]
|
2016-08-02 20:10:10 +00:00
|
|
|
pub enum ArchiveKind {
|
|
|
|
Other,
|
|
|
|
K_GNU,
|
|
|
|
K_BSD,
|
|
|
|
K_COFF,
|
|
|
|
}
|
2016-08-02 21:25:19 +00:00
|
|
|
|
2016-08-02 20:10:10 +00:00
|
|
|
/// LLVMRustPassKind
|
|
|
|
#[derive(Copy, Clone, PartialEq, Debug)]
|
|
|
|
#[repr(C)]
|
|
|
|
pub enum PassKind {
|
|
|
|
Other,
|
|
|
|
Function,
|
|
|
|
Module,
|
|
|
|
}
|
|
|
|
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
/// LLVMRustThinLTOData
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type ThinLTOData; }
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
|
|
|
|
/// LLVMRustThinLTOBuffer
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type ThinLTOBuffer; }
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
|
|
|
|
/// LLVMRustThinLTOModule
|
|
|
|
#[repr(C)]
|
|
|
|
pub struct ThinLTOModule {
|
|
|
|
pub identifier: *const c_char,
|
|
|
|
pub data: *const u8,
|
|
|
|
pub len: usize,
|
|
|
|
}
|
|
|
|
|
2017-10-31 18:24:04 +00:00
|
|
|
/// LLVMThreadLocalMode
|
|
|
|
#[derive(Copy, Clone)]
|
|
|
|
#[repr(C)]
|
|
|
|
pub enum ThreadLocalMode {
|
|
|
|
NotThreadLocal,
|
|
|
|
GeneralDynamic,
|
|
|
|
LocalDynamic,
|
|
|
|
InitialExec,
|
|
|
|
LocalExec
|
|
|
|
}
|
|
|
|
|
2016-08-02 20:10:10 +00:00
|
|
|
// Opaque pointer types
|
2018-06-27 14:57:25 +00:00
|
|
|
extern { pub type Module; }
|
|
|
|
extern { pub type Context; }
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type Type_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type TypeRef = *mut Type_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type Value_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type ValueRef = *mut Value_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type Metadata_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type MetadataRef = *mut Metadata_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type BasicBlock_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type BasicBlockRef = *mut BasicBlock_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type Builder_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type BuilderRef = *mut Builder_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type ExecutionEngine_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type ExecutionEngineRef = *mut ExecutionEngine_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type MemoryBuffer_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type MemoryBufferRef = *mut MemoryBuffer_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type PassManager_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type PassManagerRef = *mut PassManager_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type PassManagerBuilder_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type PassManagerBuilderRef = *mut PassManagerBuilder_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type Use_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type UseRef = *mut Use_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type TargetData_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type TargetDataRef = *mut TargetData_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type ObjectFile_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type ObjectFileRef = *mut ObjectFile_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type SectionIterator_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type SectionIteratorRef = *mut SectionIterator_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type Pass_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type PassRef = *mut Pass_opaque;
|
2018-06-27 14:57:25 +00:00
|
|
|
extern { pub type TargetMachine; }
|
|
|
|
pub type TargetMachineRef = *const TargetMachine;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type Archive_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type ArchiveRef = *mut Archive_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type ArchiveIterator_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type ArchiveIteratorRef = *mut ArchiveIterator_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type ArchiveChild_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type ArchiveChildRef = *mut ArchiveChild_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type Twine_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type TwineRef = *mut Twine_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type DiagnosticInfo_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type DiagnosticInfoRef = *mut DiagnosticInfo_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type DebugLoc_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type DebugLocRef = *mut DebugLoc_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type SMDiagnostic_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type SMDiagnosticRef = *mut SMDiagnostic_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type RustArchiveMember_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type RustArchiveMemberRef = *mut RustArchiveMember_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type OperandBundleDef_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type OperandBundleDefRef = *mut OperandBundleDef_opaque;
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type Linker_opaque; }
|
2018-02-12 16:38:46 +00:00
|
|
|
pub type LinkerRef = *mut Linker_opaque;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub type DiagnosticHandler = unsafe extern "C" fn(DiagnosticInfoRef, *mut c_void);
|
|
|
|
pub type InlineAsmDiagHandler = unsafe extern "C" fn(SMDiagnosticRef, *const c_void, c_uint);
|
|
|
|
|
2016-11-11 16:21:22 +00:00
|
|
|
|
2016-08-02 20:10:10 +00:00
|
|
|
pub mod debuginfo {
|
2018-06-27 10:12:47 +00:00
|
|
|
use super::Metadata_opaque;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type DIBuilder_opaque; }
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type DIBuilderRef = *mut DIBuilder_opaque;
|
|
|
|
|
2018-06-27 10:12:47 +00:00
|
|
|
pub type DIDescriptor_opaque = Metadata_opaque;
|
|
|
|
pub type DIDescriptor = *mut DIDescriptor_opaque;
|
|
|
|
pub type DIScope_opaque = DIDescriptor_opaque;
|
|
|
|
pub type DIScope = *mut DIScope_opaque;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type DILocation = DIDescriptor;
|
|
|
|
pub type DIFile = DIScope;
|
|
|
|
pub type DILexicalBlock = DIScope;
|
|
|
|
pub type DISubprogram = DIScope;
|
|
|
|
pub type DINameSpace = DIScope;
|
2018-06-27 10:12:47 +00:00
|
|
|
pub type DIType_opaque = DIDescriptor_opaque;
|
|
|
|
pub type DIType = *mut DIType_opaque;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type DIBasicType = DIType;
|
|
|
|
pub type DIDerivedType = DIType;
|
|
|
|
pub type DICompositeType = DIDerivedType;
|
|
|
|
pub type DIVariable = DIDescriptor;
|
|
|
|
pub type DIGlobalVariable = DIDescriptor;
|
2018-06-27 10:12:47 +00:00
|
|
|
pub type DIArray_opaque = DIDescriptor_opaque;
|
|
|
|
pub type DIArray = *mut DIArray_opaque;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub type DISubrange = DIDescriptor;
|
|
|
|
pub type DIEnumerator = DIDescriptor;
|
|
|
|
pub type DITemplateTypeParameter = DIDescriptor;
|
|
|
|
|
2016-11-18 22:15:14 +00:00
|
|
|
// These values **must** match with LLVMRustDIFlags!!
|
|
|
|
bitflags! {
|
|
|
|
#[repr(C)]
|
2017-09-08 19:08:01 +00:00
|
|
|
#[derive(Default)]
|
|
|
|
pub struct DIFlags: ::libc::uint32_t {
|
|
|
|
const FlagZero = 0;
|
|
|
|
const FlagPrivate = 1;
|
|
|
|
const FlagProtected = 2;
|
|
|
|
const FlagPublic = 3;
|
|
|
|
const FlagFwdDecl = (1 << 2);
|
|
|
|
const FlagAppleBlock = (1 << 3);
|
|
|
|
const FlagBlockByrefStruct = (1 << 4);
|
|
|
|
const FlagVirtual = (1 << 5);
|
|
|
|
const FlagArtificial = (1 << 6);
|
|
|
|
const FlagExplicit = (1 << 7);
|
|
|
|
const FlagPrototyped = (1 << 8);
|
|
|
|
const FlagObjcClassComplete = (1 << 9);
|
|
|
|
const FlagObjectPointer = (1 << 10);
|
|
|
|
const FlagVector = (1 << 11);
|
|
|
|
const FlagStaticMember = (1 << 12);
|
|
|
|
const FlagLValueReference = (1 << 13);
|
|
|
|
const FlagRValueReference = (1 << 14);
|
2018-01-20 20:32:33 +00:00
|
|
|
const FlagExternalTypeRef = (1 << 15);
|
|
|
|
const FlagIntroducedVirtual = (1 << 18);
|
|
|
|
const FlagBitField = (1 << 19);
|
|
|
|
const FlagNoReturn = (1 << 20);
|
2017-09-08 19:08:01 +00:00
|
|
|
const FlagMainSubprogram = (1 << 21);
|
2016-11-18 22:15:14 +00:00
|
|
|
}
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-27 10:12:47 +00:00
|
|
|
extern { pub type ModuleBuffer; }
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2018-06-27 10:12:47 +00:00
|
|
|
#[allow(improper_ctypes)] // TODO remove this (use for NonNull)
|
2016-10-22 13:07:35 +00:00
|
|
|
extern "C" {
|
|
|
|
// Create and destroy contexts.
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context;
|
|
|
|
pub fn LLVMContextDispose(C: &'static mut Context);
|
|
|
|
pub fn LLVMGetMDKindIDInContext(C: &Context, Name: *const c_char, SLen: c_uint) -> c_uint;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2018-06-27 14:57:25 +00:00
|
|
|
// Create modules.
|
|
|
|
pub fn LLVMModuleCreateWithNameInContext(ModuleID: *const c_char, C: &Context) -> &Module;
|
|
|
|
pub fn LLVMGetModuleContext(M: &Module) -> &Context;
|
|
|
|
pub fn LLVMCloneModule(M: &Module) -> &Module;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
/// Data layout. See Module::getDataLayout.
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMGetDataLayout(M: &Module) -> *const c_char;
|
|
|
|
pub fn LLVMSetDataLayout(M: &Module, Triple: *const c_char);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
/// See Module::dump.
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMDumpModule(M: &Module);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
/// See Module::setModuleInlineAsm.
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMSetModuleInlineAsm(M: &Module, Asm: *const c_char);
|
|
|
|
pub fn LLVMRustAppendModuleInlineAsm(M: &Module, Asm: *const c_char);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
/// See llvm::LLVMTypeKind::getTypeID.
|
2016-08-02 21:25:19 +00:00
|
|
|
pub fn LLVMRustGetTypeKind(Ty: TypeRef) -> TypeKind;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on integer types
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMInt1TypeInContext(C: &Context) -> TypeRef;
|
|
|
|
pub fn LLVMInt8TypeInContext(C: &Context) -> TypeRef;
|
|
|
|
pub fn LLVMInt16TypeInContext(C: &Context) -> TypeRef;
|
|
|
|
pub fn LLVMInt32TypeInContext(C: &Context) -> TypeRef;
|
|
|
|
pub fn LLVMInt64TypeInContext(C: &Context) -> TypeRef;
|
|
|
|
pub fn LLVMIntTypeInContext(C: &Context, NumBits: c_uint) -> TypeRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMGetIntTypeWidth(IntegerTy: TypeRef) -> c_uint;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on real types
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMFloatTypeInContext(C: &Context) -> TypeRef;
|
|
|
|
pub fn LLVMDoubleTypeInContext(C: &Context) -> TypeRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on function types
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMFunctionType(ReturnType: TypeRef,
|
|
|
|
ParamTypes: *const TypeRef,
|
|
|
|
ParamCount: c_uint,
|
|
|
|
IsVarArg: Bool)
|
|
|
|
-> TypeRef;
|
|
|
|
pub fn LLVMGetReturnType(FunctionTy: TypeRef) -> TypeRef;
|
|
|
|
pub fn LLVMCountParamTypes(FunctionTy: TypeRef) -> c_uint;
|
|
|
|
pub fn LLVMGetParamTypes(FunctionTy: TypeRef, Dest: *mut TypeRef);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on struct types
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMStructTypeInContext(C: &Context,
|
2016-08-02 20:10:10 +00:00
|
|
|
ElementTypes: *const TypeRef,
|
|
|
|
ElementCount: c_uint,
|
|
|
|
Packed: Bool)
|
|
|
|
-> TypeRef;
|
|
|
|
pub fn LLVMIsPackedStruct(StructTy: TypeRef) -> Bool;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on array, pointer, and vector types (sequence types)
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustArrayType(ElementType: TypeRef, ElementCount: u64) -> TypeRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMPointerType(ElementType: TypeRef, AddressSpace: c_uint) -> TypeRef;
|
|
|
|
pub fn LLVMVectorType(ElementType: TypeRef, ElementCount: c_uint) -> TypeRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMGetElementType(Ty: TypeRef) -> TypeRef;
|
|
|
|
pub fn LLVMGetVectorSize(VectorTy: TypeRef) -> c_uint;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on other types
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMVoidTypeInContext(C: &Context) -> TypeRef;
|
|
|
|
pub fn LLVMX86MMXTypeInContext(C: &Context) -> TypeRef;
|
|
|
|
pub fn LLVMRustMetadataTypeInContext(C: &Context) -> TypeRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on all values
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMTypeOf(Val: ValueRef) -> TypeRef;
|
|
|
|
pub fn LLVMGetValueName(Val: ValueRef) -> *const c_char;
|
|
|
|
pub fn LLVMSetValueName(Val: ValueRef, Name: *const c_char);
|
|
|
|
pub fn LLVMReplaceAllUsesWith(OldVal: ValueRef, NewVal: ValueRef);
|
|
|
|
pub fn LLVMSetMetadata(Val: ValueRef, KindID: c_uint, Node: ValueRef);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on Uses
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMGetFirstUse(Val: ValueRef) -> UseRef;
|
|
|
|
pub fn LLVMGetNextUse(U: UseRef) -> UseRef;
|
|
|
|
pub fn LLVMGetUser(U: UseRef) -> ValueRef;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on Users
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMGetOperand(Val: ValueRef, Index: c_uint) -> ValueRef;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on constants of any type
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMConstNull(Ty: TypeRef) -> ValueRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMConstICmp(Pred: IntPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstFCmp(Pred: RealPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMGetUndef(Ty: TypeRef) -> ValueRef;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on metadata
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMMDStringInContext(C: &Context, Str: *const c_char, SLen: c_uint) -> ValueRef;
|
|
|
|
pub fn LLVMMDNodeInContext(C: &Context, Vals: *const ValueRef, Count: c_uint) -> ValueRef;
|
|
|
|
pub fn LLVMAddNamedMetadataOperand(M: &Module, Name: *const c_char, Val: ValueRef);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on scalar constants
|
|
|
|
pub fn LLVMConstInt(IntTy: TypeRef, N: c_ulonglong, SignExtend: Bool) -> ValueRef;
|
2016-08-23 00:56:52 +00:00
|
|
|
pub fn LLVMConstIntOfArbitraryPrecision(IntTy: TypeRef, Wn: c_uint, Ws: *const u64) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMConstIntGetZExtValue(ConstantVal: ValueRef) -> c_ulonglong;
|
|
|
|
pub fn LLVMConstIntGetSExtValue(ConstantVal: ValueRef) -> c_longlong;
|
2016-08-25 22:32:46 +00:00
|
|
|
pub fn LLVMRustConstInt128Get(ConstantVal: ValueRef, SExt: bool,
|
|
|
|
high: *mut u64, low: *mut u64) -> bool;
|
2018-03-15 15:36:02 +00:00
|
|
|
pub fn LLVMConstRealGetDouble (ConstantVal: ValueRef, losesInfo: *mut Bool) -> f64;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on composite constants
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMConstStringInContext(C: &Context,
|
2016-08-02 20:10:10 +00:00
|
|
|
Str: *const c_char,
|
|
|
|
Length: c_uint,
|
|
|
|
DontNullTerminate: Bool)
|
|
|
|
-> ValueRef;
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMConstStructInContext(C: &Context,
|
2016-08-02 20:10:10 +00:00
|
|
|
ConstantVals: *const ValueRef,
|
|
|
|
Count: c_uint,
|
|
|
|
Packed: Bool)
|
|
|
|
-> ValueRef;
|
|
|
|
|
|
|
|
pub fn LLVMConstArray(ElementTy: TypeRef,
|
|
|
|
ConstantVals: *const ValueRef,
|
|
|
|
Length: c_uint)
|
|
|
|
-> ValueRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMConstVector(ScalarConstantVals: *const ValueRef, Size: c_uint) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Constant expressions
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMSizeOf(Ty: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstNeg(ConstantVal: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstFNeg(ConstantVal: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstNot(ConstantVal: ValueRef) -> ValueRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMConstAdd(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstFAdd(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstSub(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstFSub(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstMul(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstFMul(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstUDiv(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstSDiv(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstFDiv(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstURem(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstSRem(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstFRem(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstAnd(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstOr(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstXor(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstShl(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstLShr(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstAShr(LHSConstant: ValueRef, RHSConstant: ValueRef) -> ValueRef;
|
2018-01-16 08:31:48 +00:00
|
|
|
pub fn LLVMConstGEP(
|
|
|
|
ConstantVal: ValueRef,
|
|
|
|
ConstantIndices: *const ValueRef,
|
|
|
|
NumIndices: c_uint,
|
|
|
|
) -> ValueRef;
|
|
|
|
pub fn LLVMConstInBoundsGEP(
|
|
|
|
ConstantVal: ValueRef,
|
|
|
|
ConstantIndices: *const ValueRef,
|
|
|
|
NumIndices: c_uint,
|
|
|
|
) -> ValueRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMConstTrunc(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstZExt(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstUIToFP(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstSIToFP(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstFPToUI(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstFPToSI(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstPtrToInt(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstIntToPtr(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstBitCast(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstPointerCast(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMConstIntCast(ConstantVal: ValueRef, ToType: TypeRef, isSigned: Bool) -> ValueRef;
|
|
|
|
pub fn LLVMConstFPCast(ConstantVal: ValueRef, ToType: TypeRef) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMConstExtractValue(AggConstant: ValueRef,
|
|
|
|
IdxList: *const c_uint,
|
|
|
|
NumIdx: c_uint)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMConstInlineAsm(Ty: TypeRef,
|
|
|
|
AsmString: *const c_char,
|
|
|
|
Constraints: *const c_char,
|
|
|
|
HasSideEffects: Bool,
|
|
|
|
IsAlignStack: Bool)
|
|
|
|
-> ValueRef;
|
|
|
|
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on global variables, functions, and aliases (globals)
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMIsDeclaration(Global: ValueRef) -> Bool;
|
2016-09-01 18:52:33 +00:00
|
|
|
pub fn LLVMRustGetLinkage(Global: ValueRef) -> Linkage;
|
|
|
|
pub fn LLVMRustSetLinkage(Global: ValueRef, RustLinkage: Linkage);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMGetSection(Global: ValueRef) -> *const c_char;
|
|
|
|
pub fn LLVMSetSection(Global: ValueRef, Section: *const c_char);
|
2016-11-28 22:44:51 +00:00
|
|
|
pub fn LLVMRustGetVisibility(Global: ValueRef) -> Visibility;
|
|
|
|
pub fn LLVMRustSetVisibility(Global: ValueRef, Viz: Visibility);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMGetAlignment(Global: ValueRef) -> c_uint;
|
|
|
|
pub fn LLVMSetAlignment(Global: ValueRef, Bytes: c_uint);
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMSetDLLStorageClass(V: ValueRef, C: DLLStorageClass);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on global variables
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMIsAGlobalVariable(GlobalVar: ValueRef) -> ValueRef;
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMAddGlobal(M: &Module, Ty: TypeRef, Name: *const c_char) -> ValueRef;
|
|
|
|
pub fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> ValueRef;
|
|
|
|
pub fn LLVMRustGetOrInsertGlobal(M: &Module, Name: *const c_char, T: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMGetFirstGlobal(M: &Module) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMGetNextGlobal(GlobalVar: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMDeleteGlobal(GlobalVar: ValueRef);
|
|
|
|
pub fn LLVMGetInitializer(GlobalVar: ValueRef) -> ValueRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMSetInitializer(GlobalVar: ValueRef, ConstantVal: ValueRef);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMSetThreadLocal(GlobalVar: ValueRef, IsThreadLocal: Bool);
|
2017-10-31 18:24:04 +00:00
|
|
|
pub fn LLVMSetThreadLocalMode(GlobalVar: ValueRef, Mode: ThreadLocalMode);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMIsGlobalConstant(GlobalVar: ValueRef) -> Bool;
|
|
|
|
pub fn LLVMSetGlobalConstant(GlobalVar: ValueRef, IsConstant: Bool);
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustGetNamedValue(M: &Module, Name: *const c_char) -> ValueRef;
|
2017-06-03 21:54:08 +00:00
|
|
|
pub fn LLVMSetTailCall(CallInst: ValueRef, IsTailCall: Bool);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on functions
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMAddFunction(M: &Module, Name: *const c_char, FunctionTy: TypeRef) -> ValueRef;
|
|
|
|
pub fn LLVMGetNamedFunction(M: &Module, Name: *const c_char) -> ValueRef;
|
|
|
|
pub fn LLVMGetFirstFunction(M: &Module) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMGetNextFunction(Fn: ValueRef) -> ValueRef;
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustGetOrInsertFunction(M: &Module,
|
2016-08-02 20:10:10 +00:00
|
|
|
Name: *const c_char,
|
|
|
|
FunctionTy: TypeRef)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMSetFunctionCallConv(Fn: ValueRef, CC: c_uint);
|
2017-10-03 07:45:07 +00:00
|
|
|
pub fn LLVMRustAddAlignmentAttr(Fn: ValueRef, index: c_uint, bytes: u32);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustAddDereferenceableAttr(Fn: ValueRef, index: c_uint, bytes: u64);
|
2017-10-03 07:45:07 +00:00
|
|
|
pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: ValueRef, index: c_uint, bytes: u64);
|
2016-11-21 11:30:05 +00:00
|
|
|
pub fn LLVMRustAddFunctionAttribute(Fn: ValueRef, index: c_uint, attr: Attribute);
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMRustAddFunctionAttrStringValue(Fn: ValueRef,
|
|
|
|
index: c_uint,
|
2016-08-02 20:10:10 +00:00
|
|
|
Name: *const c_char,
|
|
|
|
Value: *const c_char);
|
2016-11-21 11:30:05 +00:00
|
|
|
pub fn LLVMRustRemoveFunctionAttributes(Fn: ValueRef, index: c_uint, attr: Attribute);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on parameters
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMCountParams(Fn: ValueRef) -> c_uint;
|
|
|
|
pub fn LLVMGetParam(Fn: ValueRef, Index: c_uint) -> ValueRef;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on basic blocks
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBasicBlockAsValue(BB: BasicBlockRef) -> ValueRef;
|
|
|
|
pub fn LLVMGetBasicBlockParent(BB: BasicBlockRef) -> ValueRef;
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMAppendBasicBlockInContext(C: &Context,
|
2016-08-02 20:10:10 +00:00
|
|
|
Fn: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> BasicBlockRef;
|
|
|
|
pub fn LLVMDeleteBasicBlock(BB: BasicBlockRef);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on instructions
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMGetInstructionParent(Inst: ValueRef) -> BasicBlockRef;
|
2016-12-16 23:00:17 +00:00
|
|
|
pub fn LLVMGetFirstBasicBlock(Fn: ValueRef) -> BasicBlockRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMGetFirstInstruction(BB: BasicBlockRef) -> ValueRef;
|
|
|
|
pub fn LLVMInstructionEraseFromParent(Inst: ValueRef);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on call sites
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMSetInstructionCallConv(Instr: ValueRef, CC: c_uint);
|
2016-11-21 11:30:05 +00:00
|
|
|
pub fn LLVMRustAddCallSiteAttribute(Instr: ValueRef, index: c_uint, attr: Attribute);
|
2017-10-03 07:45:07 +00:00
|
|
|
pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u32);
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u64);
|
2017-10-03 07:45:07 +00:00
|
|
|
pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: ValueRef,
|
|
|
|
index: c_uint,
|
|
|
|
bytes: u64);
|
2016-10-22 13:07:35 +00:00
|
|
|
|
|
|
|
// Operations on load/store instructions (only)
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Operations on phi nodes
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMAddIncoming(PhiNode: ValueRef,
|
|
|
|
IncomingValues: *const ValueRef,
|
|
|
|
IncomingBlocks: *const BasicBlockRef,
|
|
|
|
Count: c_uint);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Instruction builders
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMCreateBuilderInContext(C: &Context) -> BuilderRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMPositionBuilder(Builder: BuilderRef, Block: BasicBlockRef, Instr: ValueRef);
|
|
|
|
pub fn LLVMPositionBuilderBefore(Builder: BuilderRef, Instr: ValueRef);
|
|
|
|
pub fn LLVMPositionBuilderAtEnd(Builder: BuilderRef, Block: BasicBlockRef);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMGetInsertBlock(Builder: BuilderRef) -> BasicBlockRef;
|
|
|
|
pub fn LLVMDisposeBuilder(Builder: BuilderRef);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Metadata
|
2018-06-27 10:12:47 +00:00
|
|
|
pub fn LLVMSetCurrentDebugLocation(Builder: BuilderRef, L: Option<NonNull<Value_opaque>>);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMGetCurrentDebugLocation(Builder: BuilderRef) -> ValueRef;
|
|
|
|
pub fn LLVMSetInstDebugLocation(Builder: BuilderRef, Inst: ValueRef);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Terminators
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildRetVoid(B: BuilderRef) -> ValueRef;
|
|
|
|
pub fn LLVMBuildRet(B: BuilderRef, V: ValueRef) -> ValueRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMBuildAggregateRet(B: BuilderRef, RetVals: *const ValueRef, N: c_uint) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildBr(B: BuilderRef, Dest: BasicBlockRef) -> ValueRef;
|
|
|
|
pub fn LLVMBuildCondBr(B: BuilderRef,
|
|
|
|
If: ValueRef,
|
|
|
|
Then: BasicBlockRef,
|
|
|
|
Else: BasicBlockRef)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildSwitch(B: BuilderRef,
|
|
|
|
V: ValueRef,
|
|
|
|
Else: BasicBlockRef,
|
|
|
|
NumCases: c_uint)
|
|
|
|
-> ValueRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMBuildIndirectBr(B: BuilderRef, Addr: ValueRef, NumDests: c_uint) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustBuildInvoke(B: BuilderRef,
|
|
|
|
Fn: ValueRef,
|
|
|
|
Args: *const ValueRef,
|
|
|
|
NumArgs: c_uint,
|
|
|
|
Then: BasicBlockRef,
|
|
|
|
Catch: BasicBlockRef,
|
2018-06-27 10:12:47 +00:00
|
|
|
Bundle: Option<NonNull<OperandBundleDef_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
2017-12-08 09:53:46 +00:00
|
|
|
pub fn LLVMBuildLandingPad(B: BuilderRef,
|
|
|
|
Ty: TypeRef,
|
|
|
|
PersFn: ValueRef,
|
|
|
|
NumClauses: c_uint,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildResume(B: BuilderRef, Exn: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMBuildUnreachable(B: BuilderRef) -> ValueRef;
|
|
|
|
|
|
|
|
pub fn LLVMRustBuildCleanupPad(B: BuilderRef,
|
2018-06-27 10:12:47 +00:00
|
|
|
ParentPad: Option<NonNull<Value_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
ArgCnt: c_uint,
|
|
|
|
Args: *const ValueRef,
|
2016-10-22 13:07:35 +00:00
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustBuildCleanupRet(B: BuilderRef,
|
|
|
|
CleanupPad: ValueRef,
|
2018-06-27 10:12:47 +00:00
|
|
|
UnwindBB: Option<NonNull<BasicBlock_opaque>>)
|
2016-10-22 13:07:35 +00:00
|
|
|
-> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustBuildCatchPad(B: BuilderRef,
|
|
|
|
ParentPad: ValueRef,
|
|
|
|
ArgCnt: c_uint,
|
|
|
|
Args: *const ValueRef,
|
2016-10-22 13:07:35 +00:00
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustBuildCatchRet(B: BuilderRef, Pad: ValueRef, BB: BasicBlockRef) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustBuildCatchSwitch(Builder: BuilderRef,
|
2018-06-27 10:12:47 +00:00
|
|
|
ParentPad: Option<NonNull<Value_opaque>>,
|
|
|
|
BB: Option<NonNull<BasicBlock_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
NumHandlers: c_uint,
|
2016-10-22 13:07:35 +00:00
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustAddHandler(CatchSwitch: ValueRef, Handler: BasicBlockRef);
|
2017-01-26 15:51:10 +00:00
|
|
|
pub fn LLVMSetPersonalityFn(Func: ValueRef, Pers: ValueRef);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Add a case to the switch instruction
|
|
|
|
pub fn LLVMAddCase(Switch: ValueRef, OnVal: ValueRef, Dest: BasicBlockRef);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Add a clause to the landing pad instruction
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMAddClause(LandingPad: ValueRef, ClauseVal: ValueRef);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Set the cleanup on a landing pad instruction
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMSetCleanup(LandingPad: ValueRef, Val: Bool);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Arithmetic
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildAdd(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildNSWAdd(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildNUWAdd(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildFAdd(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildSub(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildNSWSub(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildNUWSub(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildFSub(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildMul(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildNSWMul(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildNUWMul(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildFMul(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildUDiv(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
Introduce unsafe offset_from on pointers
Adds intrinsics::exact_div to take advantage of the unsafe, which reduces the implementation from
```asm
sub rcx, rdx
mov rax, rcx
sar rax, 63
shr rax, 62
lea rax, [rax + rcx]
sar rax, 2
ret
```
down to
```asm
sub rcx, rdx
sar rcx, 2
mov rax, rcx
ret
```
(for `*const i32`)
2018-03-23 08:30:23 +00:00
|
|
|
pub fn LLVMBuildExactUDiv(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildSDiv(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildExactSDiv(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildFDiv(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildURem(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildSRem(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildFRem(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildShl(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildLShr(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildAShr(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildAnd(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildOr(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
2016-10-22 13:07:35 +00:00
|
|
|
-> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildXor(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildBinOp(B: BuilderRef,
|
|
|
|
Op: Opcode,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMBuildNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) -> ValueRef;
|
|
|
|
pub fn LLVMBuildNSWNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) -> ValueRef;
|
|
|
|
pub fn LLVMBuildNUWNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) -> ValueRef;
|
|
|
|
pub fn LLVMBuildFNeg(B: BuilderRef, V: ValueRef, Name: *const c_char) -> ValueRef;
|
|
|
|
pub fn LLVMBuildNot(B: BuilderRef, V: ValueRef, Name: *const c_char) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustSetHasUnsafeAlgebra(Instr: ValueRef);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Memory
|
|
|
|
pub fn LLVMBuildAlloca(B: BuilderRef, Ty: TypeRef, Name: *const c_char) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildFree(B: BuilderRef, PointerVal: ValueRef) -> ValueRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMBuildLoad(B: BuilderRef, PointerVal: ValueRef, Name: *const c_char) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMBuildStore(B: BuilderRef, Val: ValueRef, Ptr: ValueRef) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMBuildGEP(B: BuilderRef,
|
|
|
|
Pointer: ValueRef,
|
|
|
|
Indices: *const ValueRef,
|
|
|
|
NumIndices: c_uint,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildInBoundsGEP(B: BuilderRef,
|
|
|
|
Pointer: ValueRef,
|
|
|
|
Indices: *const ValueRef,
|
|
|
|
NumIndices: c_uint,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildStructGEP(B: BuilderRef,
|
|
|
|
Pointer: ValueRef,
|
|
|
|
Idx: c_uint,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildGlobalString(B: BuilderRef,
|
|
|
|
Str: *const c_char,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildGlobalStringPtr(B: BuilderRef,
|
|
|
|
Str: *const c_char,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Casts
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildTrunc(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildZExt(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildSExt(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildFPToUI(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildFPToSI(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildUIToFP(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildSIToFP(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildFPTrunc(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildFPExt(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildPtrToInt(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildIntToPtr(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildBitCast(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildZExtOrBitCast(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildSExtOrBitCast(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildTruncOrBitCast(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildCast(B: BuilderRef,
|
|
|
|
Op: Opcode,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
2016-10-22 13:07:35 +00:00
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildPointerCast(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
2017-02-10 17:29:39 +00:00
|
|
|
pub fn LLVMRustBuildIntCast(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
IsSized: bool)
|
|
|
|
-> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildFPCast(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
DestTy: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Comparisons
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildICmp(B: BuilderRef,
|
|
|
|
Op: c_uint,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildFCmp(B: BuilderRef,
|
|
|
|
Op: c_uint,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Miscellaneous instructions
|
|
|
|
pub fn LLVMBuildPhi(B: BuilderRef, Ty: TypeRef, Name: *const c_char) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustBuildCall(B: BuilderRef,
|
|
|
|
Fn: ValueRef,
|
|
|
|
Args: *const ValueRef,
|
|
|
|
NumArgs: c_uint,
|
2018-06-27 10:12:47 +00:00
|
|
|
Bundle: Option<NonNull<OperandBundleDef_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildSelect(B: BuilderRef,
|
|
|
|
If: ValueRef,
|
|
|
|
Then: ValueRef,
|
|
|
|
Else: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildVAArg(B: BuilderRef,
|
|
|
|
list: ValueRef,
|
|
|
|
Ty: TypeRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildExtractElement(B: BuilderRef,
|
|
|
|
VecVal: ValueRef,
|
|
|
|
Index: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildInsertElement(B: BuilderRef,
|
|
|
|
VecVal: ValueRef,
|
|
|
|
EltVal: ValueRef,
|
|
|
|
Index: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildShuffleVector(B: BuilderRef,
|
|
|
|
V1: ValueRef,
|
|
|
|
V2: ValueRef,
|
|
|
|
Mask: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildExtractValue(B: BuilderRef,
|
|
|
|
AggVal: ValueRef,
|
|
|
|
Index: c_uint,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMBuildInsertValue(B: BuilderRef,
|
|
|
|
AggVal: ValueRef,
|
|
|
|
EltVal: ValueRef,
|
|
|
|
Index: c_uint,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
|
2018-03-13 15:46:55 +00:00
|
|
|
pub fn LLVMRustBuildVectorReduceFAdd(B: BuilderRef,
|
|
|
|
Acc: ValueRef,
|
|
|
|
Src: ValueRef)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustBuildVectorReduceFMul(B: BuilderRef,
|
|
|
|
Acc: ValueRef,
|
|
|
|
Src: ValueRef)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustBuildVectorReduceAdd(B: BuilderRef,
|
|
|
|
Src: ValueRef)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustBuildVectorReduceMul(B: BuilderRef,
|
|
|
|
Src: ValueRef)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustBuildVectorReduceAnd(B: BuilderRef,
|
|
|
|
Src: ValueRef)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustBuildVectorReduceOr(B: BuilderRef,
|
|
|
|
Src: ValueRef)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustBuildVectorReduceXor(B: BuilderRef,
|
|
|
|
Src: ValueRef)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustBuildVectorReduceMin(B: BuilderRef,
|
|
|
|
Src: ValueRef,
|
|
|
|
IsSigned: bool)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustBuildVectorReduceMax(B: BuilderRef,
|
|
|
|
Src: ValueRef,
|
|
|
|
IsSigned: bool)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustBuildVectorReduceFMin(B: BuilderRef,
|
|
|
|
Src: ValueRef,
|
|
|
|
IsNaN: bool)
|
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustBuildVectorReduceFMax(B: BuilderRef,
|
|
|
|
Src: ValueRef,
|
|
|
|
IsNaN: bool)
|
|
|
|
-> ValueRef;
|
|
|
|
|
2018-03-21 20:49:22 +00:00
|
|
|
pub fn LLVMRustBuildMinNum(B: BuilderRef, LHS: ValueRef, LHS: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMRustBuildMaxNum(B: BuilderRef, LHS: ValueRef, LHS: ValueRef) -> ValueRef;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMBuildIsNull(B: BuilderRef, Val: ValueRef, Name: *const c_char) -> ValueRef;
|
|
|
|
pub fn LLVMBuildIsNotNull(B: BuilderRef, Val: ValueRef, Name: *const c_char) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMBuildPtrDiff(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Name: *const c_char)
|
|
|
|
-> ValueRef;
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Atomic Operations
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustBuildAtomicLoad(B: BuilderRef,
|
|
|
|
PointerVal: ValueRef,
|
|
|
|
Name: *const c_char,
|
2017-06-01 18:50:53 +00:00
|
|
|
Order: AtomicOrdering)
|
2016-08-02 20:10:10 +00:00
|
|
|
-> ValueRef;
|
|
|
|
|
|
|
|
pub fn LLVMRustBuildAtomicStore(B: BuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
Ptr: ValueRef,
|
2017-06-01 18:50:53 +00:00
|
|
|
Order: AtomicOrdering)
|
2016-08-02 20:10:10 +00:00
|
|
|
-> ValueRef;
|
|
|
|
|
|
|
|
pub fn LLVMRustBuildAtomicCmpXchg(B: BuilderRef,
|
|
|
|
LHS: ValueRef,
|
|
|
|
CMP: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Order: AtomicOrdering,
|
|
|
|
FailureOrder: AtomicOrdering,
|
|
|
|
Weak: Bool)
|
|
|
|
-> ValueRef;
|
|
|
|
|
|
|
|
pub fn LLVMBuildAtomicRMW(B: BuilderRef,
|
|
|
|
Op: AtomicRmwBinOp,
|
|
|
|
LHS: ValueRef,
|
|
|
|
RHS: ValueRef,
|
|
|
|
Order: AtomicOrdering,
|
|
|
|
SingleThreaded: Bool)
|
|
|
|
-> ValueRef;
|
|
|
|
|
|
|
|
pub fn LLVMRustBuildAtomicFence(B: BuilderRef,
|
|
|
|
Order: AtomicOrdering,
|
|
|
|
Scope: SynchronizationScope);
|
|
|
|
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Selected entries from the downcasts.
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMIsATerminatorInst(Inst: ValueRef) -> ValueRef;
|
|
|
|
pub fn LLVMIsAStoreInst(Inst: ValueRef) -> ValueRef;
|
|
|
|
|
|
|
|
/// Writes a module to the specified path. Returns 0 on success.
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
/// Creates target data from a target layout string.
|
|
|
|
pub fn LLVMCreateTargetData(StringRep: *const c_char) -> TargetDataRef;
|
|
|
|
|
|
|
|
/// Disposes target data.
|
|
|
|
pub fn LLVMDisposeTargetData(TD: TargetDataRef);
|
|
|
|
|
|
|
|
/// Creates a pass manager.
|
|
|
|
pub fn LLVMCreatePassManager() -> PassManagerRef;
|
|
|
|
|
|
|
|
/// Creates a function-by-function pass manager
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMCreateFunctionPassManagerForModule(M: &Module) -> PassManagerRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
/// Disposes a pass manager.
|
|
|
|
pub fn LLVMDisposePassManager(PM: PassManagerRef);
|
|
|
|
|
|
|
|
/// Runs a pass manager on a module.
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRunPassManager(PM: PassManagerRef, M: &Module) -> Bool;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMInitializePasses();
|
|
|
|
|
|
|
|
pub fn LLVMPassManagerBuilderCreate() -> PassManagerBuilderRef;
|
|
|
|
pub fn LLVMPassManagerBuilderDispose(PMB: PassManagerBuilderRef);
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMPassManagerBuilderSetSizeLevel(PMB: PassManagerBuilderRef, Value: Bool);
|
|
|
|
pub fn LLVMPassManagerBuilderSetDisableUnrollLoops(PMB: PassManagerBuilderRef, Value: Bool);
|
|
|
|
pub fn LLVMPassManagerBuilderUseInlinerWithThreshold(PMB: PassManagerBuilderRef,
|
|
|
|
threshold: c_uint);
|
|
|
|
pub fn LLVMPassManagerBuilderPopulateModulePassManager(PMB: PassManagerBuilderRef,
|
|
|
|
PM: PassManagerRef);
|
|
|
|
|
|
|
|
pub fn LLVMPassManagerBuilderPopulateFunctionPassManager(PMB: PassManagerBuilderRef,
|
|
|
|
PM: PassManagerRef);
|
|
|
|
pub fn LLVMPassManagerBuilderPopulateLTOPassManager(PMB: PassManagerBuilderRef,
|
|
|
|
PM: PassManagerRef,
|
|
|
|
Internalize: Bool,
|
|
|
|
RunInliner: Bool);
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
|
|
|
|
PMB: PassManagerBuilderRef,
|
|
|
|
PM: PassManagerRef) -> bool;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
// Stuff that's in rustllvm/ because it's not upstream yet.
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
/// Opens an object file.
|
|
|
|
pub fn LLVMCreateObjectFile(MemBuf: MemoryBufferRef) -> ObjectFileRef;
|
|
|
|
/// Closes an object file.
|
|
|
|
pub fn LLVMDisposeObjectFile(ObjFile: ObjectFileRef);
|
|
|
|
|
|
|
|
/// Enumerates the sections in an object file.
|
|
|
|
pub fn LLVMGetSections(ObjFile: ObjectFileRef) -> SectionIteratorRef;
|
|
|
|
/// Destroys a section iterator.
|
|
|
|
pub fn LLVMDisposeSectionIterator(SI: SectionIteratorRef);
|
|
|
|
/// Returns true if the section iterator is at the end of the section
|
|
|
|
/// list:
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMIsSectionIteratorAtEnd(ObjFile: ObjectFileRef, SI: SectionIteratorRef) -> Bool;
|
2016-08-02 20:10:10 +00:00
|
|
|
/// Moves the section iterator to point to the next section.
|
|
|
|
pub fn LLVMMoveToNextSection(SI: SectionIteratorRef);
|
|
|
|
/// Returns the current section size.
|
|
|
|
pub fn LLVMGetSectionSize(SI: SectionIteratorRef) -> c_ulonglong;
|
|
|
|
/// Returns the current section contents as a string buffer.
|
|
|
|
pub fn LLVMGetSectionContents(SI: SectionIteratorRef) -> *const c_char;
|
|
|
|
|
|
|
|
/// Reads the given file and returns it as a memory buffer. Use
|
|
|
|
/// LLVMDisposeMemoryBuffer() to get rid of it.
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMRustCreateMemoryBufferWithContentsOfFile(Path: *const c_char) -> MemoryBufferRef;
|
2016-11-28 16:31:42 +00:00
|
|
|
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMStartMultithreaded() -> Bool;
|
|
|
|
|
|
|
|
/// Returns a string describing the last error caused by an LLVMRust* call.
|
|
|
|
pub fn LLVMRustGetLastError() -> *const c_char;
|
|
|
|
|
|
|
|
/// Print the pass timings since static dtors aren't picking them up.
|
|
|
|
pub fn LLVMRustPrintPassTimings();
|
|
|
|
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> TypeRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMStructSetBody(StructTy: TypeRef,
|
|
|
|
ElementTypes: *const TypeRef,
|
|
|
|
ElementCount: c_uint,
|
|
|
|
Packed: Bool);
|
|
|
|
|
|
|
|
/// Prepares inline assembly.
|
|
|
|
pub fn LLVMRustInlineAsm(Ty: TypeRef,
|
|
|
|
AsmString: *const c_char,
|
|
|
|
Constraints: *const c_char,
|
|
|
|
SideEffects: Bool,
|
|
|
|
AlignStack: Bool,
|
2016-08-02 21:25:19 +00:00
|
|
|
Dialect: AsmDialect)
|
2016-08-02 20:10:10 +00:00
|
|
|
-> ValueRef;
|
|
|
|
|
|
|
|
pub fn LLVMRustDebugMetadataVersion() -> u32;
|
|
|
|
pub fn LLVMRustVersionMajor() -> u32;
|
|
|
|
pub fn LLVMRustVersionMinor() -> u32;
|
|
|
|
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustAddModuleFlag(M: &Module, name: *const c_char, value: u32);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustMetadataAsValue(C: &Context, MD: MetadataRef) -> ValueRef;
|
2017-02-13 09:57:50 +00:00
|
|
|
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustDIBuilderCreate(M: &Module) -> DIBuilderRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderDispose(Builder: DIBuilderRef);
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderFinalize(Builder: DIBuilderRef);
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateCompileUnit(Builder: DIBuilderRef,
|
|
|
|
Lang: c_uint,
|
2017-02-11 21:01:25 +00:00
|
|
|
File: DIFile,
|
2016-08-02 20:10:10 +00:00
|
|
|
Producer: *const c_char,
|
|
|
|
isOptimized: bool,
|
|
|
|
Flags: *const c_char,
|
|
|
|
RuntimeVer: c_uint,
|
|
|
|
SplitName: *const c_char)
|
|
|
|
-> DIDescriptor;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateFile(Builder: DIBuilderRef,
|
|
|
|
Filename: *const c_char,
|
|
|
|
Directory: *const c_char)
|
|
|
|
-> DIFile;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateSubroutineType(Builder: DIBuilderRef,
|
|
|
|
File: DIFile,
|
|
|
|
ParameterTypes: DIArray)
|
|
|
|
-> DICompositeType;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateFunction(Builder: DIBuilderRef,
|
|
|
|
Scope: DIDescriptor,
|
|
|
|
Name: *const c_char,
|
|
|
|
LinkageName: *const c_char,
|
|
|
|
File: DIFile,
|
|
|
|
LineNo: c_uint,
|
|
|
|
Ty: DIType,
|
|
|
|
isLocalToUnit: bool,
|
|
|
|
isDefinition: bool,
|
|
|
|
ScopeLine: c_uint,
|
2016-11-18 22:15:14 +00:00
|
|
|
Flags: DIFlags,
|
2016-08-02 20:10:10 +00:00
|
|
|
isOptimized: bool,
|
|
|
|
Fn: ValueRef,
|
|
|
|
TParam: DIArray,
|
2018-06-27 10:12:47 +00:00
|
|
|
Decl: Option<NonNull<DIDescriptor_opaque>>)
|
2016-08-02 20:10:10 +00:00
|
|
|
-> DISubprogram;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateBasicType(Builder: DIBuilderRef,
|
|
|
|
Name: *const c_char,
|
|
|
|
SizeInBits: u64,
|
2017-02-04 10:51:10 +00:00
|
|
|
AlignInBits: u32,
|
2016-08-02 20:10:10 +00:00
|
|
|
Encoding: c_uint)
|
|
|
|
-> DIBasicType;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreatePointerType(Builder: DIBuilderRef,
|
2016-10-22 13:07:35 +00:00
|
|
|
PointeeTy: DIType,
|
|
|
|
SizeInBits: u64,
|
2017-02-04 10:51:10 +00:00
|
|
|
AlignInBits: u32,
|
2016-10-22 13:07:35 +00:00
|
|
|
Name: *const c_char)
|
|
|
|
-> DIDerivedType;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateStructType(Builder: DIBuilderRef,
|
2018-06-27 10:12:47 +00:00
|
|
|
Scope: Option<NonNull<DIDescriptor_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
Name: *const c_char,
|
|
|
|
File: DIFile,
|
|
|
|
LineNumber: c_uint,
|
|
|
|
SizeInBits: u64,
|
2017-02-04 10:51:10 +00:00
|
|
|
AlignInBits: u32,
|
2016-11-18 22:15:14 +00:00
|
|
|
Flags: DIFlags,
|
2018-06-27 10:12:47 +00:00
|
|
|
DerivedFrom: Option<NonNull<DIType_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
Elements: DIArray,
|
|
|
|
RunTimeLang: c_uint,
|
2018-06-27 10:12:47 +00:00
|
|
|
VTableHolder: Option<NonNull<DIType_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
UniqueId: *const c_char)
|
|
|
|
-> DICompositeType;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateMemberType(Builder: DIBuilderRef,
|
|
|
|
Scope: DIDescriptor,
|
|
|
|
Name: *const c_char,
|
|
|
|
File: DIFile,
|
|
|
|
LineNo: c_uint,
|
|
|
|
SizeInBits: u64,
|
2017-02-04 10:51:10 +00:00
|
|
|
AlignInBits: u32,
|
2016-08-02 20:10:10 +00:00
|
|
|
OffsetInBits: u64,
|
2016-11-18 22:15:14 +00:00
|
|
|
Flags: DIFlags,
|
2016-08-02 20:10:10 +00:00
|
|
|
Ty: DIType)
|
|
|
|
-> DIDerivedType;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateLexicalBlock(Builder: DIBuilderRef,
|
|
|
|
Scope: DIScope,
|
|
|
|
File: DIFile,
|
|
|
|
Line: c_uint,
|
|
|
|
Col: c_uint)
|
|
|
|
-> DILexicalBlock;
|
|
|
|
|
2016-08-25 02:34:31 +00:00
|
|
|
pub fn LLVMRustDIBuilderCreateLexicalBlockFile(Builder: DIBuilderRef,
|
|
|
|
Scope: DIScope,
|
|
|
|
File: DIFile)
|
|
|
|
-> DILexicalBlock;
|
|
|
|
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustDIBuilderCreateStaticVariable(Builder: DIBuilderRef,
|
2018-06-27 10:12:47 +00:00
|
|
|
Context: Option<NonNull<DIScope_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
Name: *const c_char,
|
|
|
|
LinkageName: *const c_char,
|
|
|
|
File: DIFile,
|
|
|
|
LineNo: c_uint,
|
|
|
|
Ty: DIType,
|
|
|
|
isLocalToUnit: bool,
|
|
|
|
Val: ValueRef,
|
2018-06-27 10:12:47 +00:00
|
|
|
Decl: Option<NonNull<DIDescriptor_opaque>>,
|
2017-02-04 10:51:10 +00:00
|
|
|
AlignInBits: u32)
|
2016-08-02 20:10:10 +00:00
|
|
|
-> DIGlobalVariable;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateVariable(Builder: DIBuilderRef,
|
|
|
|
Tag: c_uint,
|
|
|
|
Scope: DIDescriptor,
|
|
|
|
Name: *const c_char,
|
|
|
|
File: DIFile,
|
|
|
|
LineNo: c_uint,
|
|
|
|
Ty: DIType,
|
|
|
|
AlwaysPreserve: bool,
|
2016-11-18 22:15:14 +00:00
|
|
|
Flags: DIFlags,
|
2016-11-18 16:11:18 +00:00
|
|
|
ArgNo: c_uint,
|
2017-02-04 10:51:10 +00:00
|
|
|
AlignInBits: u32)
|
2016-08-02 20:10:10 +00:00
|
|
|
-> DIVariable;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateArrayType(Builder: DIBuilderRef,
|
|
|
|
Size: u64,
|
2017-02-04 10:51:10 +00:00
|
|
|
AlignInBits: u32,
|
2016-08-02 20:10:10 +00:00
|
|
|
Ty: DIType,
|
|
|
|
Subscripts: DIArray)
|
|
|
|
-> DIType;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateVectorType(Builder: DIBuilderRef,
|
|
|
|
Size: u64,
|
2017-02-04 10:51:10 +00:00
|
|
|
AlignInBits: u32,
|
2016-08-02 20:10:10 +00:00
|
|
|
Ty: DIType,
|
|
|
|
Subscripts: DIArray)
|
|
|
|
-> DIType;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderGetOrCreateSubrange(Builder: DIBuilderRef,
|
|
|
|
Lo: i64,
|
|
|
|
Count: i64)
|
|
|
|
-> DISubrange;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderGetOrCreateArray(Builder: DIBuilderRef,
|
2018-06-27 10:12:47 +00:00
|
|
|
Ptr: *const Option<NonNull<DIDescriptor_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
Count: c_uint)
|
|
|
|
-> DIArray;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderInsertDeclareAtEnd(Builder: DIBuilderRef,
|
|
|
|
Val: ValueRef,
|
|
|
|
VarInfo: DIVariable,
|
|
|
|
AddrOps: *const i64,
|
|
|
|
AddrOpsCount: c_uint,
|
|
|
|
DL: ValueRef,
|
|
|
|
InsertAtEnd: BasicBlockRef)
|
|
|
|
-> ValueRef;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateEnumerator(Builder: DIBuilderRef,
|
|
|
|
Name: *const c_char,
|
|
|
|
Val: u64)
|
|
|
|
-> DIEnumerator;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateEnumerationType(Builder: DIBuilderRef,
|
|
|
|
Scope: DIScope,
|
|
|
|
Name: *const c_char,
|
|
|
|
File: DIFile,
|
|
|
|
LineNumber: c_uint,
|
|
|
|
SizeInBits: u64,
|
2017-02-04 10:51:10 +00:00
|
|
|
AlignInBits: u32,
|
2016-08-02 20:10:10 +00:00
|
|
|
Elements: DIArray,
|
|
|
|
ClassType: DIType)
|
|
|
|
-> DIType;
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateUnionType(Builder: DIBuilderRef,
|
|
|
|
Scope: DIScope,
|
|
|
|
Name: *const c_char,
|
|
|
|
File: DIFile,
|
|
|
|
LineNumber: c_uint,
|
|
|
|
SizeInBits: u64,
|
2017-02-04 10:51:10 +00:00
|
|
|
AlignInBits: u32,
|
2016-11-18 22:15:14 +00:00
|
|
|
Flags: DIFlags,
|
2018-06-27 10:12:47 +00:00
|
|
|
Elements: Option<NonNull<DIArray_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
RunTimeLang: c_uint,
|
|
|
|
UniqueId: *const c_char)
|
|
|
|
-> DIType;
|
|
|
|
|
|
|
|
pub fn LLVMSetUnnamedAddr(GlobalVar: ValueRef, UnnamedAddr: Bool);
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateTemplateTypeParameter(Builder: DIBuilderRef,
|
2018-06-27 10:12:47 +00:00
|
|
|
Scope: Option<NonNull<DIScope_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
Name: *const c_char,
|
|
|
|
Ty: DIType,
|
|
|
|
File: DIFile,
|
|
|
|
LineNo: c_uint,
|
|
|
|
ColumnNo: c_uint)
|
|
|
|
-> DITemplateTypeParameter;
|
|
|
|
|
|
|
|
|
|
|
|
pub fn LLVMRustDIBuilderCreateNameSpace(Builder: DIBuilderRef,
|
2018-06-27 10:12:47 +00:00
|
|
|
Scope: Option<NonNull<DIScope_opaque>>,
|
2016-08-02 20:10:10 +00:00
|
|
|
Name: *const c_char,
|
|
|
|
File: DIFile,
|
|
|
|
LineNo: c_uint)
|
|
|
|
-> DINameSpace;
|
|
|
|
pub fn LLVMRustDICompositeTypeSetTypeArray(Builder: DIBuilderRef,
|
|
|
|
CompositeType: DIType,
|
|
|
|
TypeArray: DIArray);
|
|
|
|
|
|
|
|
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustDIBuilderCreateDebugLocation(Context: &Context,
|
2016-08-02 20:10:10 +00:00
|
|
|
Line: c_uint,
|
|
|
|
Column: c_uint,
|
|
|
|
Scope: DIScope,
|
2018-06-27 10:12:47 +00:00
|
|
|
InlinedAt: Option<NonNull<Metadata_opaque>>)
|
2016-08-02 20:10:10 +00:00
|
|
|
-> ValueRef;
|
|
|
|
pub fn LLVMRustDIBuilderCreateOpDeref() -> i64;
|
2018-01-20 05:43:53 +00:00
|
|
|
pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> i64;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMRustWriteTypeToString(Type: TypeRef, s: RustStringRef);
|
|
|
|
pub fn LLVMRustWriteValueToString(value_ref: ValueRef, s: RustStringRef);
|
|
|
|
|
|
|
|
pub fn LLVMIsAConstantInt(value_ref: ValueRef) -> ValueRef;
|
2018-03-15 15:36:02 +00:00
|
|
|
pub fn LLVMIsAConstantFP(value_ref: ValueRef) -> ValueRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMRustPassKind(Pass: PassRef) -> PassKind;
|
|
|
|
pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> PassRef;
|
|
|
|
pub fn LLVMRustAddPass(PM: PassManagerRef, Pass: PassRef);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMRustHasFeature(T: TargetMachineRef, s: *const c_char) -> bool;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
2016-08-06 05:50:48 +00:00
|
|
|
pub fn LLVMRustPrintTargetCPUs(T: TargetMachineRef);
|
|
|
|
pub fn LLVMRustPrintTargetFeatures(T: TargetMachineRef);
|
|
|
|
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustCreateTargetMachine(Triple: *const c_char,
|
|
|
|
CPU: *const c_char,
|
|
|
|
Features: *const c_char,
|
|
|
|
Model: CodeModel,
|
|
|
|
Reloc: RelocMode,
|
|
|
|
Level: CodeGenOptLevel,
|
|
|
|
UseSoftFP: bool,
|
|
|
|
PositionIndependentExecutable: bool,
|
|
|
|
FunctionSections: bool,
|
2017-11-11 15:08:00 +00:00
|
|
|
DataSections: bool,
|
2017-10-23 03:01:00 +00:00
|
|
|
TrapUnreachable: bool,
|
|
|
|
Singlethread: bool)
|
2018-06-27 14:57:25 +00:00
|
|
|
-> Option<&'static mut TargetMachine>;
|
|
|
|
pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
|
|
|
|
pub fn LLVMRustAddAnalysisPasses(T: TargetMachineRef, PM: PassManagerRef, M: &Module);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustAddBuilderLibraryInfo(PMB: PassManagerBuilderRef,
|
2018-06-27 14:57:25 +00:00
|
|
|
M: &Module,
|
2016-08-02 20:10:10 +00:00
|
|
|
DisableSimplifyLibCalls: bool);
|
|
|
|
pub fn LLVMRustConfigurePassManagerBuilder(PMB: PassManagerBuilderRef,
|
|
|
|
OptLevel: CodeGenOptLevel,
|
|
|
|
MergeFunctions: bool,
|
|
|
|
SLPVectorize: bool,
|
2018-02-19 00:57:12 +00:00
|
|
|
LoopVectorize: bool,
|
2018-05-12 12:07:20 +00:00
|
|
|
PrepareForThinLTO: bool,
|
2018-02-19 00:57:12 +00:00
|
|
|
PGOGenPath: *const c_char,
|
|
|
|
PGOUsePath: *const c_char);
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMRustAddLibraryInfo(PM: PassManagerRef,
|
2018-06-27 14:57:25 +00:00
|
|
|
M: &Module,
|
2016-08-02 20:10:10 +00:00
|
|
|
DisableSimplifyLibCalls: bool);
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustRunFunctionPassManager(PM: PassManagerRef, M: &Module);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustWriteOutputFile(T: TargetMachineRef,
|
|
|
|
PM: PassManagerRef,
|
2018-06-27 14:57:25 +00:00
|
|
|
M: &Module,
|
2016-08-02 20:10:10 +00:00
|
|
|
Output: *const c_char,
|
|
|
|
FileType: FileType)
|
|
|
|
-> LLVMRustResult;
|
2017-06-29 14:52:43 +00:00
|
|
|
pub fn LLVMRustPrintModule(PM: PassManagerRef,
|
2018-06-27 14:57:25 +00:00
|
|
|
M: &Module,
|
2017-06-29 14:52:43 +00:00
|
|
|
Output: *const c_char,
|
|
|
|
Demangle: extern fn(*const c_char,
|
|
|
|
size_t,
|
|
|
|
*mut c_char,
|
|
|
|
size_t) -> size_t);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char);
|
|
|
|
pub fn LLVMRustPrintPasses();
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char);
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMRustAddAlwaysInlinePass(P: PassManagerBuilderRef, AddLifetimes: bool);
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t);
|
|
|
|
pub fn LLVMRustMarkAllFunctionsNounwind(M: &Module);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMRustOpenArchive(path: *const c_char) -> ArchiveRef;
|
|
|
|
pub fn LLVMRustArchiveIteratorNew(AR: ArchiveRef) -> ArchiveIteratorRef;
|
|
|
|
pub fn LLVMRustArchiveIteratorNext(AIR: ArchiveIteratorRef) -> ArchiveChildRef;
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMRustArchiveChildName(ACR: ArchiveChildRef, size: *mut size_t) -> *const c_char;
|
|
|
|
pub fn LLVMRustArchiveChildData(ACR: ArchiveChildRef, size: *mut size_t) -> *const c_char;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustArchiveChildFree(ACR: ArchiveChildRef);
|
|
|
|
pub fn LLVMRustArchiveIteratorFree(AIR: ArchiveIteratorRef);
|
|
|
|
pub fn LLVMRustDestroyArchive(AR: ArchiveRef);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMRustGetSectionName(SI: SectionIteratorRef, data: *mut *const c_char) -> size_t;
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMRustWriteTwineToString(T: TwineRef, s: RustStringRef);
|
|
|
|
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMContextSetDiagnosticHandler(C: &Context,
|
2016-08-02 20:10:10 +00:00
|
|
|
Handler: DiagnosticHandler,
|
|
|
|
DiagnosticContext: *mut c_void);
|
|
|
|
|
|
|
|
pub fn LLVMRustUnpackOptimizationDiagnostic(DI: DiagnosticInfoRef,
|
2016-11-28 14:15:51 +00:00
|
|
|
pass_name_out: RustStringRef,
|
2016-08-02 20:10:10 +00:00
|
|
|
function_out: *mut ValueRef,
|
2017-07-21 12:29:23 +00:00
|
|
|
loc_line_out: *mut c_uint,
|
|
|
|
loc_column_out: *mut c_uint,
|
|
|
|
loc_filename_out: RustStringRef,
|
2016-11-24 16:33:47 +00:00
|
|
|
message_out: RustStringRef);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustUnpackInlineAsmDiagnostic(DI: DiagnosticInfoRef,
|
|
|
|
cookie_out: *mut c_uint,
|
|
|
|
message_out: *mut TwineRef,
|
|
|
|
instruction_out: *mut ValueRef);
|
|
|
|
|
2016-10-22 13:07:35 +00:00
|
|
|
pub fn LLVMRustWriteDiagnosticInfoToString(DI: DiagnosticInfoRef, s: RustStringRef);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustGetDiagInfoKind(DI: DiagnosticInfoRef) -> DiagnosticKind;
|
|
|
|
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustSetInlineAsmDiagnosticHandler(C: &Context,
|
2016-08-02 20:10:10 +00:00
|
|
|
H: InlineAsmDiagHandler,
|
|
|
|
CX: *mut c_void);
|
|
|
|
|
|
|
|
pub fn LLVMRustWriteSMDiagnosticToString(d: SMDiagnosticRef, s: RustStringRef);
|
|
|
|
|
|
|
|
pub fn LLVMRustWriteArchive(Dst: *const c_char,
|
|
|
|
NumMembers: size_t,
|
|
|
|
Members: *const RustArchiveMemberRef,
|
|
|
|
WriteSymbtab: bool,
|
2016-10-22 13:07:35 +00:00
|
|
|
Kind: ArchiveKind)
|
|
|
|
-> LLVMRustResult;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustArchiveMemberNew(Filename: *const c_char,
|
|
|
|
Name: *const c_char,
|
2018-06-27 10:12:47 +00:00
|
|
|
Child: Option<NonNull<ArchiveChild_opaque>>)
|
2016-10-22 13:07:35 +00:00
|
|
|
-> RustArchiveMemberRef;
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustArchiveMemberFree(Member: RustArchiveMemberRef);
|
|
|
|
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustSetDataLayoutFromTargetMachine(M: &Module, TM: TargetMachineRef);
|
2016-08-02 20:10:10 +00:00
|
|
|
|
|
|
|
pub fn LLVMRustBuildOperandBundleDef(Name: *const c_char,
|
|
|
|
Inputs: *const ValueRef,
|
|
|
|
NumInputs: c_uint)
|
|
|
|
-> OperandBundleDefRef;
|
|
|
|
pub fn LLVMRustFreeOperandBundleDef(Bundle: OperandBundleDefRef);
|
|
|
|
|
|
|
|
pub fn LLVMRustPositionBuilderAtStart(B: BuilderRef, BB: BasicBlockRef);
|
|
|
|
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustSetComdat(M: &Module, V: ValueRef, Name: *const c_char);
|
2016-08-02 20:10:10 +00:00
|
|
|
pub fn LLVMRustUnsetComdat(V: ValueRef);
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustSetModulePIELevel(M: &Module);
|
|
|
|
pub fn LLVMRustModuleBufferCreate(M: &Module) -> *mut ModuleBuffer;
|
2017-07-23 15:14:38 +00:00
|
|
|
pub fn LLVMRustModuleBufferPtr(p: *const ModuleBuffer) -> *const u8;
|
|
|
|
pub fn LLVMRustModuleBufferLen(p: *const ModuleBuffer) -> usize;
|
|
|
|
pub fn LLVMRustModuleBufferFree(p: *mut ModuleBuffer);
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustModuleCost(M: &Module) -> u64;
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
|
|
|
|
pub fn LLVMRustThinLTOAvailable() -> bool;
|
2018-03-15 15:56:45 +00:00
|
|
|
pub fn LLVMRustPGOAvailable() -> bool;
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
pub fn LLVMRustWriteThinBitcodeToFile(PMR: PassManagerRef,
|
2018-06-27 14:57:25 +00:00
|
|
|
M: &Module,
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
BC: *const c_char) -> bool;
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustThinLTOBufferCreate(M: &Module) -> *mut ThinLTOBuffer;
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
pub fn LLVMRustThinLTOBufferFree(M: *mut ThinLTOBuffer);
|
|
|
|
pub fn LLVMRustThinLTOBufferPtr(M: *const ThinLTOBuffer) -> *const c_char;
|
|
|
|
pub fn LLVMRustThinLTOBufferLen(M: *const ThinLTOBuffer) -> size_t;
|
|
|
|
pub fn LLVMRustCreateThinLTOData(
|
|
|
|
Modules: *const ThinLTOModule,
|
|
|
|
NumModules: c_uint,
|
|
|
|
PreservedSymbols: *const *const c_char,
|
|
|
|
PreservedSymbolsLen: c_uint,
|
|
|
|
) -> *mut ThinLTOData;
|
|
|
|
pub fn LLVMRustPrepareThinLTORename(
|
|
|
|
Data: *const ThinLTOData,
|
2018-06-27 14:57:25 +00:00
|
|
|
Module: &Module,
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
) -> bool;
|
|
|
|
pub fn LLVMRustPrepareThinLTOResolveWeak(
|
|
|
|
Data: *const ThinLTOData,
|
2018-06-27 14:57:25 +00:00
|
|
|
Module: &Module,
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
) -> bool;
|
|
|
|
pub fn LLVMRustPrepareThinLTOInternalize(
|
|
|
|
Data: *const ThinLTOData,
|
2018-06-27 14:57:25 +00:00
|
|
|
Module: &Module,
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
) -> bool;
|
|
|
|
pub fn LLVMRustPrepareThinLTOImport(
|
|
|
|
Data: *const ThinLTOData,
|
2018-06-27 14:57:25 +00:00
|
|
|
Module: &Module,
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
) -> bool;
|
|
|
|
pub fn LLVMRustFreeThinLTOData(Data: *mut ThinLTOData);
|
|
|
|
pub fn LLVMRustParseBitcodeForThinLTO(
|
2018-06-27 14:57:25 +00:00
|
|
|
Context: &Context,
|
rustc: Implement ThinLTO
This commit is an implementation of LLVM's ThinLTO for consumption in rustc
itself. Currently today LTO works by merging all relevant LLVM modules into one
and then running optimization passes. "Thin" LTO operates differently by having
more sharded work and allowing parallelism opportunities between optimizing
codegen units. Further down the road Thin LTO also allows *incremental* LTO
which should enable even faster release builds without compromising on the
performance we have today.
This commit uses a `-Z thinlto` flag to gate whether ThinLTO is enabled. It then
also implements two forms of ThinLTO:
* In one mode we'll *only* perform ThinLTO over the codegen units produced in a
single compilation. That is, we won't load upstream rlibs, but we'll instead
just perform ThinLTO amongst all codegen units produced by the compiler for
the local crate. This is intended to emulate a desired end point where we have
codegen units turned on by default for all crates and ThinLTO allows us to do
this without performance loss.
* In anther mode, like full LTO today, we'll optimize all upstream dependencies
in "thin" mode. Unlike today, however, this LTO step is fully parallelized so
should finish much more quickly.
There's a good bit of comments about what the implementation is doing and where
it came from, but the tl;dr; is that currently most of the support here is
copied from upstream LLVM. This code duplication is done for a number of
reasons:
* Controlling parallelism means we can use the existing jobserver support to
avoid overloading machines.
* We will likely want a slightly different form of incremental caching which
integrates with our own incremental strategy, but this is yet to be
determined.
* This buys us some flexibility about when/where we run ThinLTO, as well as
having it tailored to fit our needs for the time being.
* Finally this allows us to reuse some artifacts such as our `TargetMachine`
creation, where all our options we used today aren't necessarily supported by
upstream LLVM yet.
My hope is that we can get some experience with this copy/paste in tree and then
eventually upstream some work to LLVM itself to avoid the duplication while
still ensuring our needs are met. Otherwise I fear that maintaining these
bindings may be quite costly over the years with LLVM updates!
2017-07-23 15:14:38 +00:00
|
|
|
Data: *const u8,
|
|
|
|
len: usize,
|
|
|
|
Identifier: *const c_char,
|
2018-06-27 14:57:25 +00:00
|
|
|
) -> Option<&Module>;
|
|
|
|
pub fn LLVMGetModuleIdentifier(M: &Module, size: *mut usize) -> *const c_char;
|
|
|
|
pub fn LLVMRustThinLTOGetDICompileUnit(M: &Module,
|
2017-12-16 16:20:54 +00:00
|
|
|
CU1: *mut *mut c_void,
|
|
|
|
CU2: *mut *mut c_void);
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustThinLTOPatchDICompileUnit(M: &Module, CU: *mut c_void);
|
2018-02-12 16:38:46 +00:00
|
|
|
|
2018-06-27 14:57:25 +00:00
|
|
|
pub fn LLVMRustLinkerNew(M: &Module) -> LinkerRef;
|
2018-02-12 16:38:46 +00:00
|
|
|
pub fn LLVMRustLinkerAdd(linker: LinkerRef,
|
|
|
|
bytecode: *const c_char,
|
|
|
|
bytecode_len: usize) -> bool;
|
|
|
|
pub fn LLVMRustLinkerFree(linker: LinkerRef);
|
2016-08-02 20:10:10 +00:00
|
|
|
}
|