mirror of
https://github.com/rust-lang/rust.git
synced 2025-05-06 06:57:42 +00:00
syntax: doc comments all the things
This commit is contained in:
parent
5716abe3f0
commit
4989a56448
@ -60,9 +60,12 @@ pub struct AbiData {
|
||||
}
|
||||
|
||||
pub enum AbiArchitecture {
|
||||
RustArch, // Not a real ABI (e.g., intrinsic)
|
||||
AllArch, // An ABI that specifies cross-platform defaults (e.g., "C")
|
||||
Archs(u32) // Multiple architectures (bitset)
|
||||
/// Not a real ABI (e.g., intrinsic)
|
||||
RustArch,
|
||||
/// An ABI that specifies cross-platform defaults (e.g., "C")
|
||||
AllArch,
|
||||
/// Multiple architectures (bitset)
|
||||
Archs(u32)
|
||||
}
|
||||
|
||||
static AbiDatas: &'static [AbiData] = &[
|
||||
@ -84,21 +87,13 @@ static AbiDatas: &'static [AbiData] = &[
|
||||
AbiData {abi: RustIntrinsic, name: "rust-intrinsic", abi_arch: RustArch},
|
||||
];
|
||||
|
||||
/// Iterates through each of the defined ABIs.
|
||||
fn each_abi(op: |abi: Abi| -> bool) -> bool {
|
||||
/*!
|
||||
*
|
||||
* Iterates through each of the defined ABIs.
|
||||
*/
|
||||
|
||||
AbiDatas.iter().advance(|abi_data| op(abi_data.abi))
|
||||
}
|
||||
|
||||
/// Returns the ABI with the given name (if any).
|
||||
pub fn lookup(name: &str) -> Option<Abi> {
|
||||
/*!
|
||||
*
|
||||
* Returns the ABI with the given name (if any).
|
||||
*/
|
||||
|
||||
let mut res = None;
|
||||
|
||||
each_abi(|abi| {
|
||||
|
@ -24,7 +24,8 @@ use std::rc::Rc;
|
||||
use std::gc::{Gc, GC};
|
||||
use serialize::{Encodable, Decodable, Encoder, Decoder};
|
||||
|
||||
/// A pointer abstraction. FIXME(eddyb) #10676 use Rc<T> in the future.
|
||||
/// A pointer abstraction.
|
||||
// FIXME(eddyb) #10676 use Rc<T> in the future.
|
||||
pub type P<T> = Gc<T>;
|
||||
|
||||
#[allow(non_snake_case_functions)]
|
||||
@ -36,10 +37,10 @@ pub fn P<T: 'static>(value: T) -> P<T> {
|
||||
// FIXME #6993: in librustc, uses of "ident" should be replaced
|
||||
// by just "Name".
|
||||
|
||||
// an identifier contains a Name (index into the interner
|
||||
// table) and a SyntaxContext to track renaming and
|
||||
// macro expansion per Flatt et al., "Macros
|
||||
// That Work Together"
|
||||
/// An identifier contains a Name (index into the interner
|
||||
/// table) and a SyntaxContext to track renaming and
|
||||
/// macro expansion per Flatt et al., "Macros
|
||||
/// That Work Together"
|
||||
#[deriving(Clone, Hash, PartialOrd, Eq, Ord, Show)]
|
||||
pub struct Ident {
|
||||
pub name: Name,
|
||||
@ -122,10 +123,9 @@ pub struct Lifetime {
|
||||
pub name: Name
|
||||
}
|
||||
|
||||
// a "Path" is essentially Rust's notion of a name;
|
||||
// for instance: std::cmp::PartialEq . It's represented
|
||||
// as a sequence of identifiers, along with a bunch
|
||||
// of supporting information.
|
||||
/// A "Path" is essentially Rust's notion of a name; for instance:
|
||||
/// std::cmp::PartialEq . It's represented as a sequence of identifiers,
|
||||
/// along with a bunch of supporting information.
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub struct Path {
|
||||
pub span: Span,
|
||||
@ -163,15 +163,15 @@ pub struct DefId {
|
||||
pub static LOCAL_CRATE: CrateNum = 0;
|
||||
pub static CRATE_NODE_ID: NodeId = 0;
|
||||
|
||||
// When parsing and doing expansions, we initially give all AST nodes this AST
|
||||
// node value. Then later, in the renumber pass, we renumber them to have
|
||||
// small, positive ids.
|
||||
/// When parsing and doing expansions, we initially give all AST nodes this AST
|
||||
/// node value. Then later, in the renumber pass, we renumber them to have
|
||||
/// small, positive ids.
|
||||
pub static DUMMY_NODE_ID: NodeId = -1;
|
||||
|
||||
// The AST represents all type param bounds as types.
|
||||
// typeck::collect::compute_bounds matches these against
|
||||
// the "special" built-in traits (see middle::lang_items) and
|
||||
// detects Copy, Send and Share.
|
||||
/// The AST represents all type param bounds as types.
|
||||
/// typeck::collect::compute_bounds matches these against
|
||||
/// the "special" built-in traits (see middle::lang_items) and
|
||||
/// detects Copy, Send and Share.
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum TyParamBound {
|
||||
TraitTyParamBound(TraitRef),
|
||||
@ -210,9 +210,9 @@ impl Generics {
|
||||
}
|
||||
}
|
||||
|
||||
// The set of MetaItems that define the compilation environment of the crate,
|
||||
// used to drive conditional compilation
|
||||
pub type CrateConfig = Vec<Gc<MetaItem>>;
|
||||
/// The set of MetaItems that define the compilation environment of the crate,
|
||||
/// used to drive conditional compilation
|
||||
pub type CrateConfig = Vec<Gc<MetaItem>> ;
|
||||
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub struct Crate {
|
||||
@ -289,13 +289,13 @@ pub enum BindingMode {
|
||||
pub enum Pat_ {
|
||||
PatWild,
|
||||
PatWildMulti,
|
||||
// A PatIdent may either be a new bound variable,
|
||||
// or a nullary enum (in which case the third field
|
||||
// is None).
|
||||
// In the nullary enum case, the parser can't determine
|
||||
// which it is. The resolver determines this, and
|
||||
// records this pattern's NodeId in an auxiliary
|
||||
// set (of "PatIdents that refer to nullary enums")
|
||||
/// A PatIdent may either be a new bound variable,
|
||||
/// or a nullary enum (in which case the third field
|
||||
/// is None).
|
||||
/// In the nullary enum case, the parser can't determine
|
||||
/// which it is. The resolver determines this, and
|
||||
/// records this pattern's NodeId in an auxiliary
|
||||
/// set (of "PatIdents that refer to nullary enums")
|
||||
PatIdent(BindingMode, SpannedIdent, Option<Gc<Pat>>),
|
||||
PatEnum(Path, Option<Vec<Gc<Pat>>>), /* "none" means a * pattern where
|
||||
* we don't bind the fields to names */
|
||||
@ -305,8 +305,8 @@ pub enum Pat_ {
|
||||
PatRegion(Gc<Pat>), // reference pattern
|
||||
PatLit(Gc<Expr>),
|
||||
PatRange(Gc<Expr>, Gc<Expr>),
|
||||
// [a, b, ..i, y, z] is represented as
|
||||
// PatVec(~[a, b], Some(i), ~[y, z])
|
||||
/// [a, b, ..i, y, z] is represented as:
|
||||
/// PatVec(~[a, b], Some(i), ~[y, z])
|
||||
PatVec(Vec<Gc<Pat>>, Option<Gc<Pat>>, Vec<Gc<Pat>>),
|
||||
PatMac(Mac),
|
||||
}
|
||||
@ -319,9 +319,12 @@ pub enum Mutability {
|
||||
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum ExprVstore {
|
||||
ExprVstoreUniq, // ~[1,2,3,4]
|
||||
ExprVstoreSlice, // &[1,2,3,4]
|
||||
ExprVstoreMutSlice, // &mut [1,2,3,4]
|
||||
/// ~[1, 2, 3, 4]
|
||||
ExprVstoreUniq,
|
||||
/// &[1, 2, 3, 4]
|
||||
ExprVstoreSlice,
|
||||
/// &mut [1, 2, 3, 4]
|
||||
ExprVstoreMutSlice,
|
||||
}
|
||||
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
@ -359,16 +362,16 @@ pub type Stmt = Spanned<Stmt_>;
|
||||
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum Stmt_ {
|
||||
// could be an item or a local (let) binding:
|
||||
/// Could be an item or a local (let) binding:
|
||||
StmtDecl(Gc<Decl>, NodeId),
|
||||
|
||||
// expr without trailing semi-colon (must have unit type):
|
||||
/// Expr without trailing semi-colon (must have unit type):
|
||||
StmtExpr(Gc<Expr>, NodeId),
|
||||
|
||||
// expr with trailing semi-colon (may have any type):
|
||||
/// Expr with trailing semi-colon (may have any type):
|
||||
StmtSemi(Gc<Expr>, NodeId),
|
||||
|
||||
// bool: is there a trailing sem-colon?
|
||||
/// bool: is there a trailing sem-colon?
|
||||
StmtMac(Mac, bool),
|
||||
}
|
||||
|
||||
@ -397,9 +400,9 @@ pub type Decl = Spanned<Decl_>;
|
||||
|
||||
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum Decl_ {
|
||||
// a local (let) binding:
|
||||
/// A local (let) binding:
|
||||
DeclLocal(Gc<Local>),
|
||||
// an item binding:
|
||||
/// An item binding:
|
||||
DeclItem(Gc<Item>),
|
||||
}
|
||||
|
||||
@ -443,7 +446,7 @@ pub struct Expr {
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum Expr_ {
|
||||
ExprVstore(Gc<Expr>, ExprVstore),
|
||||
// First expr is the place; second expr is the value.
|
||||
/// First expr is the place; second expr is the value.
|
||||
ExprBox(Gc<Expr>, Gc<Expr>),
|
||||
ExprVec(Vec<Gc<Expr>>),
|
||||
ExprCall(Gc<Expr>, Vec<Gc<Expr>>),
|
||||
@ -483,124 +486,121 @@ pub enum Expr_ {
|
||||
|
||||
ExprMac(Mac),
|
||||
|
||||
// A struct literal expression.
|
||||
/// A struct literal expression.
|
||||
ExprStruct(Path, Vec<Field> , Option<Gc<Expr>> /* base */),
|
||||
|
||||
// A vector literal constructed from one repeated element.
|
||||
/// A vector literal constructed from one repeated element.
|
||||
ExprRepeat(Gc<Expr> /* element */, Gc<Expr> /* count */),
|
||||
|
||||
// No-op: used solely so we can pretty-print faithfully
|
||||
/// No-op: used solely so we can pretty-print faithfully
|
||||
ExprParen(Gc<Expr>)
|
||||
}
|
||||
|
||||
// When the main rust parser encounters a syntax-extension invocation, it
|
||||
// parses the arguments to the invocation as a token-tree. This is a very
|
||||
// loose structure, such that all sorts of different AST-fragments can
|
||||
// be passed to syntax extensions using a uniform type.
|
||||
//
|
||||
// If the syntax extension is an MBE macro, it will attempt to match its
|
||||
// LHS "matchers" against the provided token tree, and if it finds a
|
||||
// match, will transcribe the RHS token tree, splicing in any captured
|
||||
// macro_parser::matched_nonterminals into the TTNonterminals it finds.
|
||||
//
|
||||
// The RHS of an MBE macro is the only place a TTNonterminal or TTSeq
|
||||
// makes any real sense. You could write them elsewhere but nothing
|
||||
// else knows what to do with them, so you'll probably get a syntax
|
||||
// error.
|
||||
//
|
||||
/// When the main rust parser encounters a syntax-extension invocation, it
|
||||
/// parses the arguments to the invocation as a token-tree. This is a very
|
||||
/// loose structure, such that all sorts of different AST-fragments can
|
||||
/// be passed to syntax extensions using a uniform type.
|
||||
///
|
||||
/// If the syntax extension is an MBE macro, it will attempt to match its
|
||||
/// LHS "matchers" against the provided token tree, and if it finds a
|
||||
/// match, will transcribe the RHS token tree, splicing in any captured
|
||||
/// macro_parser::matched_nonterminals into the TTNonterminals it finds.
|
||||
///
|
||||
/// The RHS of an MBE macro is the only place a TTNonterminal or TTSeq
|
||||
/// makes any real sense. You could write them elsewhere but nothing
|
||||
/// else knows what to do with them, so you'll probably get a syntax
|
||||
/// error.
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
#[doc="For macro invocations; parsing is delegated to the macro"]
|
||||
pub enum TokenTree {
|
||||
// a single token
|
||||
/// A single token
|
||||
TTTok(Span, ::parse::token::Token),
|
||||
// a delimited sequence (the delimiters appear as the first
|
||||
// and last elements of the vector)
|
||||
/// A delimited sequence (the delimiters appear as the first
|
||||
/// and last elements of the vector)
|
||||
// FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST.
|
||||
TTDelim(Rc<Vec<TokenTree>>),
|
||||
|
||||
// These only make sense for right-hand-sides of MBE macros:
|
||||
|
||||
// a kleene-style repetition sequence with a span, a TTForest,
|
||||
// an optional separator, and a boolean where true indicates
|
||||
// zero or more (..), and false indicates one or more (+).
|
||||
/// A kleene-style repetition sequence with a span, a TTForest,
|
||||
/// an optional separator, and a boolean where true indicates
|
||||
/// zero or more (..), and false indicates one or more (+).
|
||||
// FIXME(eddyb) #6308 Use Rc<[TokenTree]> after DST.
|
||||
TTSeq(Span, Rc<Vec<TokenTree>>, Option<::parse::token::Token>, bool),
|
||||
|
||||
// a syntactic variable that will be filled in by macro expansion.
|
||||
/// A syntactic variable that will be filled in by macro expansion.
|
||||
TTNonterminal(Span, Ident)
|
||||
}
|
||||
|
||||
//
|
||||
// Matchers are nodes defined-by and recognized-by the main rust parser and
|
||||
// language, but they're only ever found inside syntax-extension invocations;
|
||||
// indeed, the only thing that ever _activates_ the rules in the rust parser
|
||||
// for parsing a matcher is a matcher looking for the 'matchers' nonterminal
|
||||
// itself. Matchers represent a small sub-language for pattern-matching
|
||||
// token-trees, and are thus primarily used by the macro-defining extension
|
||||
// itself.
|
||||
//
|
||||
// MatchTok
|
||||
// --------
|
||||
//
|
||||
// A matcher that matches a single token, denoted by the token itself. So
|
||||
// long as there's no $ involved.
|
||||
//
|
||||
//
|
||||
// MatchSeq
|
||||
// --------
|
||||
//
|
||||
// A matcher that matches a sequence of sub-matchers, denoted various
|
||||
// possible ways:
|
||||
//
|
||||
// $(M)* zero or more Ms
|
||||
// $(M)+ one or more Ms
|
||||
// $(M),+ one or more comma-separated Ms
|
||||
// $(A B C);* zero or more semi-separated 'A B C' seqs
|
||||
//
|
||||
//
|
||||
// MatchNonterminal
|
||||
// -----------------
|
||||
//
|
||||
// A matcher that matches one of a few interesting named rust
|
||||
// nonterminals, such as types, expressions, items, or raw token-trees. A
|
||||
// black-box matcher on expr, for example, binds an expr to a given ident,
|
||||
// and that ident can re-occur as an interpolation in the RHS of a
|
||||
// macro-by-example rule. For example:
|
||||
//
|
||||
// $foo:expr => 1 + $foo // interpolate an expr
|
||||
// $foo:tt => $foo // interpolate a token-tree
|
||||
// $foo:tt => bar! $foo // only other valid interpolation
|
||||
// // is in arg position for another
|
||||
// // macro
|
||||
//
|
||||
// As a final, horrifying aside, note that macro-by-example's input is
|
||||
// also matched by one of these matchers. Holy self-referential! It is matched
|
||||
// by a MatchSeq, specifically this one:
|
||||
//
|
||||
// $( $lhs:matchers => $rhs:tt );+
|
||||
//
|
||||
// If you understand that, you have closed to loop and understand the whole
|
||||
// macro system. Congratulations.
|
||||
//
|
||||
/// Matchers are nodes defined-by and recognized-by the main rust parser and
|
||||
/// language, but they're only ever found inside syntax-extension invocations;
|
||||
/// indeed, the only thing that ever _activates_ the rules in the rust parser
|
||||
/// for parsing a matcher is a matcher looking for the 'matchers' nonterminal
|
||||
/// itself. Matchers represent a small sub-language for pattern-matching
|
||||
/// token-trees, and are thus primarily used by the macro-defining extension
|
||||
/// itself.
|
||||
///
|
||||
/// MatchTok
|
||||
/// --------
|
||||
///
|
||||
/// A matcher that matches a single token, denoted by the token itself. So
|
||||
/// long as there's no $ involved.
|
||||
///
|
||||
///
|
||||
/// MatchSeq
|
||||
/// --------
|
||||
///
|
||||
/// A matcher that matches a sequence of sub-matchers, denoted various
|
||||
/// possible ways:
|
||||
///
|
||||
/// $(M)* zero or more Ms
|
||||
/// $(M)+ one or more Ms
|
||||
/// $(M),+ one or more comma-separated Ms
|
||||
/// $(A B C);* zero or more semi-separated 'A B C' seqs
|
||||
///
|
||||
///
|
||||
/// MatchNonterminal
|
||||
/// -----------------
|
||||
///
|
||||
/// A matcher that matches one of a few interesting named rust
|
||||
/// nonterminals, such as types, expressions, items, or raw token-trees. A
|
||||
/// black-box matcher on expr, for example, binds an expr to a given ident,
|
||||
/// and that ident can re-occur as an interpolation in the RHS of a
|
||||
/// macro-by-example rule. For example:
|
||||
///
|
||||
/// $foo:expr => 1 + $foo // interpolate an expr
|
||||
/// $foo:tt => $foo // interpolate a token-tree
|
||||
/// $foo:tt => bar! $foo // only other valid interpolation
|
||||
/// // is in arg position for another
|
||||
/// // macro
|
||||
///
|
||||
/// As a final, horrifying aside, note that macro-by-example's input is
|
||||
/// also matched by one of these matchers. Holy self-referential! It is matched
|
||||
/// by a MatchSeq, specifically this one:
|
||||
///
|
||||
/// $( $lhs:matchers => $rhs:tt );+
|
||||
///
|
||||
/// If you understand that, you have closed the loop and understand the whole
|
||||
/// macro system. Congratulations.
|
||||
pub type Matcher = Spanned<Matcher_>;
|
||||
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum Matcher_ {
|
||||
// match one token
|
||||
/// Match one token
|
||||
MatchTok(::parse::token::Token),
|
||||
// match repetitions of a sequence: body, separator, zero ok?,
|
||||
// lo, hi position-in-match-array used:
|
||||
/// Match repetitions of a sequence: body, separator, zero ok?,
|
||||
/// lo, hi position-in-match-array used:
|
||||
MatchSeq(Vec<Matcher> , Option<::parse::token::Token>, bool, uint, uint),
|
||||
// parse a Rust NT: name to bind, name of NT, position in match array:
|
||||
/// Parse a Rust NT: name to bind, name of NT, position in match array:
|
||||
MatchNonterminal(Ident, Ident, uint)
|
||||
}
|
||||
|
||||
pub type Mac = Spanned<Mac_>;
|
||||
|
||||
// represents a macro invocation. The Path indicates which macro
|
||||
// is being invoked, and the vector of token-trees contains the source
|
||||
// of the macro invocation.
|
||||
// There's only one flavor, now, so this could presumably be simplified.
|
||||
/// Represents a macro invocation. The Path indicates which macro
|
||||
/// is being invoked, and the vector of token-trees contains the source
|
||||
/// of the macro invocation.
|
||||
/// There's only one flavor, now, so this could presumably be simplified.
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum Mac_ {
|
||||
MacInvocTT(Path, Vec<TokenTree> , SyntaxContext), // new macro-invocation
|
||||
@ -659,11 +659,10 @@ pub struct TypeMethod {
|
||||
pub vis: Visibility,
|
||||
}
|
||||
|
||||
/// Represents a method declaration in a trait declaration, possibly
|
||||
/// including a default implementation
|
||||
// A trait method is either required (meaning it doesn't have an
|
||||
// implementation, just a signature) or provided (meaning it has a default
|
||||
// implementation).
|
||||
/// Represents a method declaration in a trait declaration, possibly including
|
||||
/// a default implementation A trait method is either required (meaning it
|
||||
/// doesn't have an implementation, just a signature) or provided (meaning it
|
||||
/// has a default implementation).
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum TraitMethod {
|
||||
Required(TypeMethod),
|
||||
@ -720,7 +719,7 @@ pub struct Ty {
|
||||
pub span: Span,
|
||||
}
|
||||
|
||||
// Not represented directly in the AST, referred to by name through a ty_path.
|
||||
/// Not represented directly in the AST, referred to by name through a ty_path.
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum PrimTy {
|
||||
TyInt(IntTy),
|
||||
@ -753,10 +752,10 @@ pub struct ClosureTy {
|
||||
pub fn_style: FnStyle,
|
||||
pub onceness: Onceness,
|
||||
pub decl: P<FnDecl>,
|
||||
// Optional optvec distinguishes between "fn()" and "fn:()" so we can
|
||||
// implement issue #7264. None means "fn()", which means infer a default
|
||||
// bound based on pointer sigil during typeck. Some(Empty) means "fn:()",
|
||||
// which means use no bounds (e.g., not even Owned on a ~fn()).
|
||||
/// Optional optvec distinguishes between "fn()" and "fn:()" so we can
|
||||
/// implement issue #7264. None means "fn()", which means infer a default
|
||||
/// bound based on pointer sigil during typeck. Some(Empty) means "fn:()",
|
||||
/// which means use no bounds (e.g., not even Owned on a ~fn()).
|
||||
pub bounds: Option<OwnedSlice<TyParamBound>>,
|
||||
}
|
||||
|
||||
@ -789,11 +788,11 @@ pub enum Ty_ {
|
||||
TyUnboxedFn(Gc<UnboxedFnTy>),
|
||||
TyTup(Vec<P<Ty>> ),
|
||||
TyPath(Path, Option<OwnedSlice<TyParamBound>>, NodeId), // for #7264; see above
|
||||
// No-op; kept solely so that we can pretty-print faithfully
|
||||
/// No-op; kept solely so that we can pretty-print faithfully
|
||||
TyParen(P<Ty>),
|
||||
TyTypeof(Gc<Expr>),
|
||||
// TyInfer means the type should be inferred instead of it having been
|
||||
// specified. This can appear anywhere in a type.
|
||||
/// TyInfer means the type should be inferred instead of it having been
|
||||
/// specified. This can appear anywhere in a type.
|
||||
TyInfer,
|
||||
}
|
||||
|
||||
@ -854,8 +853,10 @@ pub struct FnDecl {
|
||||
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum FnStyle {
|
||||
UnsafeFn, // declared with "unsafe fn"
|
||||
NormalFn, // declared with "fn"
|
||||
/// Declared with "unsafe fn"
|
||||
UnsafeFn,
|
||||
/// Declared with "fn"
|
||||
NormalFn,
|
||||
}
|
||||
|
||||
impl fmt::Show for FnStyle {
|
||||
@ -869,18 +870,24 @@ impl fmt::Show for FnStyle {
|
||||
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum RetStyle {
|
||||
NoReturn, // functions with return type _|_ that always
|
||||
// raise an error or exit (i.e. never return to the caller)
|
||||
Return, // everything else
|
||||
/// Functions with return type ! that always
|
||||
/// raise an error or exit (i.e. never return to the caller)
|
||||
NoReturn,
|
||||
/// Everything else
|
||||
Return,
|
||||
}
|
||||
|
||||
/// Represents the kind of 'self' associated with a method
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum ExplicitSelf_ {
|
||||
SelfStatic, // no self
|
||||
SelfValue(Ident), // `self`
|
||||
SelfRegion(Option<Lifetime>, Mutability, Ident), // `&'lt self`, `&'lt mut self`
|
||||
SelfUniq(Ident), // `~self`
|
||||
/// No self
|
||||
SelfStatic,
|
||||
/// `self
|
||||
SelfValue(Ident),
|
||||
/// `&'lt self`, `&'lt mut self`
|
||||
SelfRegion(Option<Lifetime>, Mutability, Ident),
|
||||
/// `~self`
|
||||
SelfUniq(Ident)
|
||||
}
|
||||
|
||||
pub type ExplicitSelf = Spanned<ExplicitSelf_>;
|
||||
@ -959,17 +966,17 @@ pub type ViewPath = Spanned<ViewPath_>;
|
||||
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum ViewPath_ {
|
||||
|
||||
// quux = foo::bar::baz
|
||||
//
|
||||
// or just
|
||||
//
|
||||
// foo::bar::baz (with 'baz =' implicitly on the left)
|
||||
/// `quux = foo::bar::baz`
|
||||
///
|
||||
/// or just
|
||||
///
|
||||
/// `foo::bar::baz ` (with 'baz =' implicitly on the left)
|
||||
ViewPathSimple(Ident, Path, NodeId),
|
||||
|
||||
// foo::bar::*
|
||||
/// `foo::bar::*`
|
||||
ViewPathGlob(Path, NodeId),
|
||||
|
||||
// foo::bar::{a,b,c}
|
||||
/// `foo::bar::{a,b,c}`
|
||||
ViewPathList(Path, Vec<PathListIdent> , NodeId)
|
||||
}
|
||||
|
||||
@ -983,20 +990,20 @@ pub struct ViewItem {
|
||||
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum ViewItem_ {
|
||||
// ident: name used to refer to this crate in the code
|
||||
// optional (InternedString,StrStyle): if present, this is a location
|
||||
// (containing arbitrary characters) from which to fetch the crate sources
|
||||
// For example, extern crate whatever = "github.com/rust-lang/rust"
|
||||
/// Ident: name used to refer to this crate in the code
|
||||
/// optional (InternedString,StrStyle): if present, this is a location
|
||||
/// (containing arbitrary characters) from which to fetch the crate sources
|
||||
/// For example, extern crate whatever = "github.com/rust-lang/rust"
|
||||
ViewItemExternCrate(Ident, Option<(InternedString,StrStyle)>, NodeId),
|
||||
ViewItemUse(Gc<ViewPath>),
|
||||
}
|
||||
|
||||
// Meta-data associated with an item
|
||||
/// Meta-data associated with an item
|
||||
pub type Attribute = Spanned<Attribute_>;
|
||||
|
||||
// Distinguishes between Attributes that decorate items and Attributes that
|
||||
// are contained as statements within items. These two cases need to be
|
||||
// distinguished for pretty-printing.
|
||||
/// Distinguishes between Attributes that decorate items and Attributes that
|
||||
/// are contained as statements within items. These two cases need to be
|
||||
/// distinguished for pretty-printing.
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum AttrStyle {
|
||||
AttrOuter,
|
||||
@ -1006,7 +1013,7 @@ pub enum AttrStyle {
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub struct AttrId(pub uint);
|
||||
|
||||
// doc-comments are promoted to attributes that have is_sugared_doc = true
|
||||
/// Doc-comments are promoted to attributes that have is_sugared_doc = true
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub struct Attribute_ {
|
||||
pub id: AttrId,
|
||||
@ -1015,13 +1022,12 @@ pub struct Attribute_ {
|
||||
pub is_sugared_doc: bool,
|
||||
}
|
||||
|
||||
/*
|
||||
TraitRef's appear in impls.
|
||||
resolve maps each TraitRef's ref_id to its defining trait; that's all
|
||||
that the ref_id is for. The impl_id maps to the "self type" of this impl.
|
||||
If this impl is an ItemImpl, the impl_id is redundant (it could be the
|
||||
same as the impl's node id).
|
||||
*/
|
||||
|
||||
/// TraitRef's appear in impls.
|
||||
/// resolve maps each TraitRef's ref_id to its defining trait; that's all
|
||||
/// that the ref_id is for. The impl_id maps to the "self type" of this impl.
|
||||
/// If this impl is an ItemImpl, the impl_id is redundant (it could be the
|
||||
/// same as the impl's node id).
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub struct TraitRef {
|
||||
pub path: Path,
|
||||
@ -1065,7 +1071,8 @@ pub type StructField = Spanned<StructField_>;
|
||||
#[deriving(Clone, PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum StructFieldKind {
|
||||
NamedField(Ident, Visibility),
|
||||
UnnamedField(Visibility), // element of a tuple-like struct
|
||||
/// Element of a tuple-like struct
|
||||
UnnamedField(Visibility),
|
||||
}
|
||||
|
||||
impl StructFieldKind {
|
||||
@ -1079,12 +1086,15 @@ impl StructFieldKind {
|
||||
|
||||
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub struct StructDef {
|
||||
pub fields: Vec<StructField>, /* fields, not including ctor */
|
||||
/* ID of the constructor. This is only used for tuple- or enum-like
|
||||
* structs. */
|
||||
/// Fields, not including ctor
|
||||
pub fields: Vec<StructField>,
|
||||
/// ID of the constructor. This is only used for tuple- or enum-like
|
||||
/// structs.
|
||||
pub ctor_id: Option<NodeId>,
|
||||
pub super_struct: Option<P<Ty>>, // Super struct, if specified.
|
||||
pub is_virtual: bool, // True iff the struct may be inherited from.
|
||||
/// Super struct, if specified.
|
||||
pub super_struct: Option<P<Ty>>,
|
||||
/// True iff the struct may be inherited from.
|
||||
pub is_virtual: bool,
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1120,7 +1130,7 @@ pub enum Item_ {
|
||||
Option<TraitRef>, // (optional) trait this impl implements
|
||||
P<Ty>, // self
|
||||
Vec<Gc<Method>>),
|
||||
// a macro invocation (which includes macro definition)
|
||||
/// A macro invocation (which includes macro definition)
|
||||
ItemMac(Mac),
|
||||
}
|
||||
|
||||
@ -1140,9 +1150,9 @@ pub enum ForeignItem_ {
|
||||
ForeignItemStatic(P<Ty>, /* is_mutbl */ bool),
|
||||
}
|
||||
|
||||
// The data we save and restore about an inlined item or method. This is not
|
||||
// part of the AST that we parse from a file, but it becomes part of the tree
|
||||
// that we trans.
|
||||
/// The data we save and restore about an inlined item or method. This is not
|
||||
/// part of the AST that we parse from a file, but it becomes part of the tree
|
||||
/// that we trans.
|
||||
#[deriving(PartialEq, Eq, Encodable, Decodable, Hash)]
|
||||
pub enum InlinedItem {
|
||||
IIItem(Gc<Item>),
|
||||
|
@ -112,13 +112,13 @@ pub enum Node {
|
||||
NodeLifetime(Gc<Lifetime>),
|
||||
}
|
||||
|
||||
// The odd layout is to bring down the total size.
|
||||
/// The odd layout is to bring down the total size.
|
||||
#[deriving(Clone)]
|
||||
enum MapEntry {
|
||||
// Placeholder for holes in the map.
|
||||
/// Placeholder for holes in the map.
|
||||
NotPresent,
|
||||
|
||||
// All the node types, with a parent ID.
|
||||
/// All the node types, with a parent ID.
|
||||
EntryItem(NodeId, Gc<Item>),
|
||||
EntryForeignItem(NodeId, Gc<ForeignItem>),
|
||||
EntryTraitMethod(NodeId, Gc<TraitMethod>),
|
||||
@ -133,14 +133,14 @@ enum MapEntry {
|
||||
EntryStructCtor(NodeId, Gc<StructDef>),
|
||||
EntryLifetime(NodeId, Gc<Lifetime>),
|
||||
|
||||
// Roots for node trees.
|
||||
/// Roots for node trees.
|
||||
RootCrate,
|
||||
RootInlinedParent(P<InlinedParent>)
|
||||
}
|
||||
|
||||
struct InlinedParent {
|
||||
path: Vec<PathElem> ,
|
||||
// Required by NodeTraitMethod and NodeMethod.
|
||||
/// Required by NodeTraitMethod and NodeMethod.
|
||||
def_id: DefId
|
||||
}
|
||||
|
||||
@ -243,7 +243,7 @@ impl Map {
|
||||
ItemForeignMod(ref nm) => Some(nm.abi),
|
||||
_ => None
|
||||
},
|
||||
// Wrong but OK, because the only inlined foreign items are intrinsics.
|
||||
/// Wrong but OK, because the only inlined foreign items are intrinsics.
|
||||
Some(RootInlinedParent(_)) => Some(abi::RustIntrinsic),
|
||||
_ => None
|
||||
};
|
||||
@ -432,8 +432,8 @@ pub trait FoldOps {
|
||||
|
||||
pub struct Ctx<'a, F> {
|
||||
map: &'a Map,
|
||||
// The node in which we are currently mapping (an item or a method).
|
||||
// When equal to DUMMY_NODE_ID, the next mapped node becomes the parent.
|
||||
/// The node in which we are currently mapping (an item or a method).
|
||||
/// When equal to DUMMY_NODE_ID, the next mapped node becomes the parent.
|
||||
parent: NodeId,
|
||||
fold_ops: F
|
||||
}
|
||||
@ -618,9 +618,9 @@ pub fn map_crate<F: FoldOps>(krate: Crate, fold_ops: F) -> (Crate, Map) {
|
||||
(krate, map)
|
||||
}
|
||||
|
||||
// Used for items loaded from external crate that are being inlined into this
|
||||
// crate. The `path` should be the path to the item but should not include
|
||||
// the item itself.
|
||||
/// Used for items loaded from external crate that are being inlined into this
|
||||
/// crate. The `path` should be the path to the item but should not include
|
||||
/// the item itself.
|
||||
pub fn map_decoded_item<F: FoldOps>(map: &Map,
|
||||
path: Vec<PathElem> ,
|
||||
fold_ops: F,
|
||||
|
@ -101,8 +101,8 @@ pub fn is_path(e: Gc<Expr>) -> bool {
|
||||
return match e.node { ExprPath(_) => true, _ => false };
|
||||
}
|
||||
|
||||
// Get a string representation of a signed int type, with its value.
|
||||
// We want to avoid "45int" and "-3int" in favor of "45" and "-3"
|
||||
/// Get a string representation of a signed int type, with its value.
|
||||
/// We want to avoid "45int" and "-3int" in favor of "45" and "-3"
|
||||
pub fn int_ty_to_string(t: IntTy, val: Option<i64>) -> String {
|
||||
let s = match t {
|
||||
TyI if val.is_some() => "i",
|
||||
@ -131,8 +131,8 @@ pub fn int_ty_max(t: IntTy) -> u64 {
|
||||
}
|
||||
}
|
||||
|
||||
// Get a string representation of an unsigned int type, with its value.
|
||||
// We want to avoid "42uint" in favor of "42u"
|
||||
/// Get a string representation of an unsigned int type, with its value.
|
||||
/// We want to avoid "42uint" in favor of "42u"
|
||||
pub fn uint_ty_to_string(t: UintTy, val: Option<u64>) -> String {
|
||||
let s = match t {
|
||||
TyU if val.is_some() => "u",
|
||||
@ -249,8 +249,8 @@ pub fn public_methods(ms: Vec<Gc<Method>> ) -> Vec<Gc<Method>> {
|
||||
}).collect()
|
||||
}
|
||||
|
||||
// extract a TypeMethod from a TraitMethod. if the TraitMethod is
|
||||
// a default, pull out the useful fields to make a TypeMethod
|
||||
/// extract a TypeMethod from a TraitMethod. if the TraitMethod is
|
||||
/// a default, pull out the useful fields to make a TypeMethod
|
||||
pub fn trait_method_to_ty_method(method: &TraitMethod) -> TypeMethod {
|
||||
match *method {
|
||||
Required(ref m) => (*m).clone(),
|
||||
@ -705,7 +705,7 @@ pub fn segments_name_eq(a : &[ast::PathSegment], b : &[ast::PathSegment]) -> boo
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if this literal is a string and false otherwise.
|
||||
/// Returns true if this literal is a string and false otherwise.
|
||||
pub fn lit_is_str(lit: Gc<Lit>) -> bool {
|
||||
match lit.node {
|
||||
LitStr(..) => true,
|
||||
|
@ -46,10 +46,8 @@ pub trait AttrMetaMethods {
|
||||
/// #[foo="bar"] and #[foo(bar)]
|
||||
fn name(&self) -> InternedString;
|
||||
|
||||
/**
|
||||
* Gets the string value if self is a MetaNameValue variant
|
||||
* containing a string, otherwise None.
|
||||
*/
|
||||
/// Gets the string value if self is a MetaNameValue variant
|
||||
/// containing a string, otherwise None.
|
||||
fn value_str(&self) -> Option<InternedString>;
|
||||
/// Gets a list of inner meta items from a list MetaItem type.
|
||||
fn meta_item_list<'a>(&'a self) -> Option<&'a [Gc<MetaItem>]>;
|
||||
@ -420,18 +418,16 @@ pub fn require_unique_names(diagnostic: &SpanHandler, metas: &[Gc<MetaItem>]) {
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Fold this over attributes to parse #[repr(...)] forms.
|
||||
*
|
||||
* Valid repr contents: any of the primitive integral type names (see
|
||||
* `int_type_of_word`, below) to specify the discriminant type; and `C`, to use
|
||||
* the same discriminant size that the corresponding C enum would. These are
|
||||
* not allowed on univariant or zero-variant enums, which have no discriminant.
|
||||
*
|
||||
* If a discriminant type is so specified, then the discriminant will be
|
||||
* present (before fields, if any) with that type; reprensentation
|
||||
* optimizations which would remove it will not be done.
|
||||
*/
|
||||
/// Fold this over attributes to parse #[repr(...)] forms.
|
||||
///
|
||||
/// Valid repr contents: any of the primitive integral type names (see
|
||||
/// `int_type_of_word`, below) to specify the discriminant type; and `C`, to use
|
||||
/// the same discriminant size that the corresponding C enum would. These are
|
||||
/// not allowed on univariant or zero-variant enums, which have no discriminant.
|
||||
///
|
||||
/// If a discriminant type is so specified, then the discriminant will be
|
||||
/// present (before fields, if any) with that type; reprensentation
|
||||
/// optimizations which would remove it will not be done.
|
||||
pub fn find_repr_attr(diagnostic: &SpanHandler, attr: &Attribute, acc: ReprAttr)
|
||||
-> ReprAttr {
|
||||
let mut acc = acc;
|
||||
|
@ -252,15 +252,15 @@ pub struct FileMap {
|
||||
}
|
||||
|
||||
impl FileMap {
|
||||
// EFFECT: register a start-of-line offset in the
|
||||
// table of line-beginnings.
|
||||
// UNCHECKED INVARIANT: these offsets must be added in the right
|
||||
// order and must be in the right places; there is shared knowledge
|
||||
// about what ends a line between this file and parse.rs
|
||||
// WARNING: pos param here is the offset relative to start of CodeMap,
|
||||
// and CodeMap will append a newline when adding a filemap without a newline at the end,
|
||||
// so the safe way to call this is with value calculated as
|
||||
// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap.
|
||||
/// EFFECT: register a start-of-line offset in the
|
||||
/// table of line-beginnings.
|
||||
/// UNCHECKED INVARIANT: these offsets must be added in the right
|
||||
/// order and must be in the right places; there is shared knowledge
|
||||
/// about what ends a line between this file and parse.rs
|
||||
/// WARNING: pos param here is the offset relative to start of CodeMap,
|
||||
/// and CodeMap will append a newline when adding a filemap without a newline at the end,
|
||||
/// so the safe way to call this is with value calculated as
|
||||
/// filemap.start_pos + newline_offset_relative_to_the_start_of_filemap.
|
||||
pub fn next_line(&self, pos: BytePos) {
|
||||
// the new charpos must be > the last one (or it's the first one).
|
||||
let mut lines = self.lines.borrow_mut();;
|
||||
@ -269,7 +269,7 @@ impl FileMap {
|
||||
lines.push(pos);
|
||||
}
|
||||
|
||||
// get a line from the list of pre-computed line-beginnings
|
||||
/// get a line from the list of pre-computed line-beginnings
|
||||
pub fn get_line(&self, line: int) -> String {
|
||||
let mut lines = self.lines.borrow_mut();
|
||||
let begin: BytePos = *lines.get(line as uint) - self.start_pos;
|
||||
@ -428,7 +428,7 @@ impl CodeMap {
|
||||
FileMapAndBytePos {fm: fm, pos: offset}
|
||||
}
|
||||
|
||||
// Converts an absolute BytePos to a CharPos relative to the filemap and above.
|
||||
/// Converts an absolute BytePos to a CharPos relative to the filemap and above.
|
||||
pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
|
||||
debug!("codemap: converting {:?} to char pos", bpos);
|
||||
let idx = self.lookup_filemap_idx(bpos);
|
||||
|
@ -21,7 +21,7 @@ use std::string::String;
|
||||
use term::WriterWrapper;
|
||||
use term;
|
||||
|
||||
// maximum number of lines we will print for each error; arbitrary.
|
||||
/// maximum number of lines we will print for each error; arbitrary.
|
||||
static MAX_LINES: uint = 6u;
|
||||
|
||||
#[deriving(Clone)]
|
||||
@ -73,9 +73,9 @@ pub struct FatalError;
|
||||
/// or `.span_bug` rather than a failed assertion, etc.
|
||||
pub struct ExplicitBug;
|
||||
|
||||
// a span-handler is like a handler but also
|
||||
// accepts span information for source-location
|
||||
// reporting.
|
||||
/// A span-handler is like a handler but also
|
||||
/// accepts span information for source-location
|
||||
/// reporting.
|
||||
pub struct SpanHandler {
|
||||
pub handler: Handler,
|
||||
pub cm: codemap::CodeMap,
|
||||
@ -114,9 +114,9 @@ impl SpanHandler {
|
||||
}
|
||||
}
|
||||
|
||||
// a handler deals with errors; certain errors
|
||||
// (fatal, bug, unimpl) may cause immediate exit,
|
||||
// others log errors for later reporting.
|
||||
/// A handler deals with errors; certain errors
|
||||
/// (fatal, bug, unimpl) may cause immediate exit,
|
||||
/// others log errors for later reporting.
|
||||
pub struct Handler {
|
||||
err_count: Cell<uint>,
|
||||
emit: RefCell<Box<Emitter + Send>>,
|
||||
@ -442,12 +442,12 @@ fn highlight_lines(err: &mut EmitterWriter,
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Here are the differences between this and the normal `highlight_lines`:
|
||||
// `custom_highlight_lines` will always put arrow on the last byte of the
|
||||
// span (instead of the first byte). Also, when the span is too long (more
|
||||
// than 6 lines), `custom_highlight_lines` will print the first line, then
|
||||
// dot dot dot, then last line, whereas `highlight_lines` prints the first
|
||||
// six lines.
|
||||
/// Here are the differences between this and the normal `highlight_lines`:
|
||||
/// `custom_highlight_lines` will always put arrow on the last byte of the
|
||||
/// span (instead of the first byte). Also, when the span is too long (more
|
||||
/// than 6 lines), `custom_highlight_lines` will print the first line, then
|
||||
/// dot dot dot, then last line, whereas `highlight_lines` prints the first
|
||||
/// six lines.
|
||||
fn custom_highlight_lines(w: &mut EmitterWriter,
|
||||
cm: &codemap::CodeMap,
|
||||
sp: Span,
|
||||
|
@ -278,9 +278,9 @@ pub enum SyntaxExtension {
|
||||
pub type NamedSyntaxExtension = (Name, SyntaxExtension);
|
||||
|
||||
pub struct BlockInfo {
|
||||
// should macros escape from this scope?
|
||||
/// Should macros escape from this scope?
|
||||
pub macros_escape: bool,
|
||||
// what are the pending renames?
|
||||
/// What are the pending renames?
|
||||
pub pending_renames: mtwt::RenameList,
|
||||
}
|
||||
|
||||
@ -293,8 +293,8 @@ impl BlockInfo {
|
||||
}
|
||||
}
|
||||
|
||||
// The base map of methods for expanding syntax extension
|
||||
// AST nodes into full ASTs
|
||||
/// The base map of methods for expanding syntax extension
|
||||
/// AST nodes into full ASTs
|
||||
pub fn syntax_expander_table() -> SyntaxEnv {
|
||||
// utility function to simplify creating NormalTT syntax extensions
|
||||
fn builtin_normal_expander(f: MacroExpanderFn) -> SyntaxExtension {
|
||||
@ -398,9 +398,9 @@ pub fn syntax_expander_table() -> SyntaxEnv {
|
||||
syntax_expanders
|
||||
}
|
||||
|
||||
// One of these is made during expansion and incrementally updated as we go;
|
||||
// when a macro expansion occurs, the resulting nodes have the backtrace()
|
||||
// -> expn_info of their expansion context stored into their span.
|
||||
/// One of these is made during expansion and incrementally updated as we go;
|
||||
/// when a macro expansion occurs, the resulting nodes have the backtrace()
|
||||
/// -> expn_info of their expansion context stored into their span.
|
||||
pub struct ExtCtxt<'a> {
|
||||
pub parse_sess: &'a parse::ParseSess,
|
||||
pub cfg: ast::CrateConfig,
|
||||
@ -612,11 +612,11 @@ pub fn get_exprs_from_tts(cx: &mut ExtCtxt,
|
||||
Some(es)
|
||||
}
|
||||
|
||||
// in order to have some notion of scoping for macros,
|
||||
// we want to implement the notion of a transformation
|
||||
// environment.
|
||||
/// In order to have some notion of scoping for macros,
|
||||
/// we want to implement the notion of a transformation
|
||||
/// environment.
|
||||
|
||||
// This environment maps Names to SyntaxExtensions.
|
||||
/// This environment maps Names to SyntaxExtensions.
|
||||
|
||||
//impl question: how to implement it? Initially, the
|
||||
// env will contain only macros, so it might be painful
|
||||
@ -633,7 +633,6 @@ struct MapChainFrame {
|
||||
map: HashMap<Name, SyntaxExtension>,
|
||||
}
|
||||
|
||||
// Only generic to make it easy to test
|
||||
pub struct SyntaxEnv {
|
||||
chain: Vec<MapChainFrame> ,
|
||||
}
|
||||
|
@ -8,79 +8,76 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
/*!
|
||||
|
||||
The compiler code necessary to implement the `#[deriving(Encodable)]`
|
||||
(and `Decodable`, in decodable.rs) extension. The idea here is that
|
||||
type-defining items may be tagged with `#[deriving(Encodable, Decodable)]`.
|
||||
|
||||
For example, a type like:
|
||||
|
||||
```ignore
|
||||
#[deriving(Encodable, Decodable)]
|
||||
struct Node { id: uint }
|
||||
```
|
||||
|
||||
would generate two implementations like:
|
||||
|
||||
```ignore
|
||||
impl<S:serialize::Encoder> Encodable<S> for Node {
|
||||
fn encode(&self, s: &S) {
|
||||
s.emit_struct("Node", 1, || {
|
||||
s.emit_field("id", 0, || s.emit_uint(self.id))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<D:Decoder> Decodable for node_id {
|
||||
fn decode(d: &D) -> Node {
|
||||
d.read_struct("Node", 1, || {
|
||||
Node {
|
||||
id: d.read_field("x".to_string(), 0, || decode(d))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Other interesting scenarios are whe the item has type parameters or
|
||||
references other non-built-in types. A type definition like:
|
||||
|
||||
```ignore
|
||||
#[deriving(Encodable, Decodable)]
|
||||
struct spanned<T> { node: T, span: Span }
|
||||
```
|
||||
|
||||
would yield functions like:
|
||||
|
||||
```ignore
|
||||
impl<
|
||||
S: Encoder,
|
||||
T: Encodable<S>
|
||||
> spanned<T>: Encodable<S> {
|
||||
fn encode<S:Encoder>(s: &S) {
|
||||
s.emit_rec(|| {
|
||||
s.emit_field("node", 0, || self.node.encode(s));
|
||||
s.emit_field("span", 1, || self.span.encode(s));
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
D: Decoder,
|
||||
T: Decodable<D>
|
||||
> spanned<T>: Decodable<D> {
|
||||
fn decode(d: &D) -> spanned<T> {
|
||||
d.read_rec(|| {
|
||||
{
|
||||
node: d.read_field("node".to_string(), 0, || decode(d)),
|
||||
span: d.read_field("span".to_string(), 1, || decode(d)),
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
```
|
||||
*/
|
||||
//! The compiler code necessary to implement the `#[deriving(Encodable)]`
|
||||
//! (and `Decodable`, in decodable.rs) extension. The idea here is that
|
||||
//! type-defining items may be tagged with `#[deriving(Encodable, Decodable)]`.
|
||||
//!
|
||||
//! For example, a type like:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! #[deriving(Encodable, Decodable)]
|
||||
//! struct Node { id: uint }
|
||||
//! ```
|
||||
//!
|
||||
//! would generate two implementations like:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! impl<S:serialize::Encoder> Encodable<S> for Node {
|
||||
//! fn encode(&self, s: &S) {
|
||||
//! s.emit_struct("Node", 1, || {
|
||||
//! s.emit_field("id", 0, || s.emit_uint(self.id))
|
||||
//! })
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! impl<D:Decoder> Decodable for node_id {
|
||||
//! fn decode(d: &D) -> Node {
|
||||
//! d.read_struct("Node", 1, || {
|
||||
//! Node {
|
||||
//! id: d.read_field("x".to_string(), 0, || decode(d))
|
||||
//! }
|
||||
//! })
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! Other interesting scenarios are whe the item has type parameters or
|
||||
//! references other non-built-in types. A type definition like:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! #[deriving(Encodable, Decodable)]
|
||||
//! struct spanned<T> { node: T, span: Span }
|
||||
//! ```
|
||||
//!
|
||||
//! would yield functions like:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! impl<
|
||||
//! S: Encoder,
|
||||
//! T: Encodable<S>
|
||||
//! > spanned<T>: Encodable<S> {
|
||||
//! fn encode<S:Encoder>(s: &S) {
|
||||
//! s.emit_rec(|| {
|
||||
//! s.emit_field("node", 0, || self.node.encode(s));
|
||||
//! s.emit_field("span", 1, || self.span.encode(s));
|
||||
//! })
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! impl<
|
||||
//! D: Decoder,
|
||||
//! T: Decodable<D>
|
||||
//! > spanned<T>: Decodable<D> {
|
||||
//! fn decode(d: &D) -> spanned<T> {
|
||||
//! d.read_rec(|| {
|
||||
//! {
|
||||
//! node: d.read_field("node".to_string(), 0, || decode(d)),
|
||||
//! span: d.read_field("span".to_string(), 1, || decode(d)),
|
||||
//! }
|
||||
//! })
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
|
||||
use ast::{MetaItem, Item, Expr, ExprRet, MutMutable, LitNil};
|
||||
use codemap::Span;
|
||||
|
@ -8,174 +8,170 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
/*!
|
||||
|
||||
Some code that abstracts away much of the boilerplate of writing
|
||||
`deriving` instances for traits. Among other things it manages getting
|
||||
access to the fields of the 4 different sorts of structs and enum
|
||||
variants, as well as creating the method and impl ast instances.
|
||||
|
||||
Supported features (fairly exhaustive):
|
||||
|
||||
- Methods taking any number of parameters of any type, and returning
|
||||
any type, other than vectors, bottom and closures.
|
||||
- Generating `impl`s for types with type parameters and lifetimes
|
||||
(e.g. `Option<T>`), the parameters are automatically given the
|
||||
current trait as a bound. (This includes separate type parameters
|
||||
and lifetimes for methods.)
|
||||
- Additional bounds on the type parameters, e.g. the `Ord` instance
|
||||
requires an explicit `PartialEq` bound at the
|
||||
moment. (`TraitDef.additional_bounds`)
|
||||
|
||||
Unsupported: FIXME #6257: calling methods on reference fields,
|
||||
e.g. deriving Eq/Ord/Clone don't work on `struct A(&int)`,
|
||||
because of how the auto-dereferencing happens.
|
||||
|
||||
The most important thing for implementers is the `Substructure` and
|
||||
`SubstructureFields` objects. The latter groups 5 possibilities of the
|
||||
arguments:
|
||||
|
||||
- `Struct`, when `Self` is a struct (including tuple structs, e.g
|
||||
`struct T(int, char)`).
|
||||
- `EnumMatching`, when `Self` is an enum and all the arguments are the
|
||||
same variant of the enum (e.g. `Some(1)`, `Some(3)` and `Some(4)`)
|
||||
- `EnumNonMatching` when `Self` is an enum and the arguments are not
|
||||
the same variant (e.g. `None`, `Some(1)` and `None`). If
|
||||
`const_nonmatching` is true, this will contain an empty list.
|
||||
- `StaticEnum` and `StaticStruct` for static methods, where the type
|
||||
being derived upon is either an enum or struct respectively. (Any
|
||||
argument with type Self is just grouped among the non-self
|
||||
arguments.)
|
||||
|
||||
In the first two cases, the values from the corresponding fields in
|
||||
all the arguments are grouped together. In the `EnumNonMatching` case
|
||||
this isn't possible (different variants have different fields), so the
|
||||
fields are grouped by which argument they come from. There are no
|
||||
fields with values in the static cases, so these are treated entirely
|
||||
differently.
|
||||
|
||||
The non-static cases have `Option<ident>` in several places associated
|
||||
with field `expr`s. This represents the name of the field it is
|
||||
associated with. It is only not `None` when the associated field has
|
||||
an identifier in the source code. For example, the `x`s in the
|
||||
following snippet
|
||||
|
||||
```rust
|
||||
struct A { x : int }
|
||||
|
||||
struct B(int);
|
||||
|
||||
enum C {
|
||||
C0(int),
|
||||
C1 { x: int }
|
||||
}
|
||||
```
|
||||
|
||||
The `int`s in `B` and `C0` don't have an identifier, so the
|
||||
`Option<ident>`s would be `None` for them.
|
||||
|
||||
In the static cases, the structure is summarised, either into the just
|
||||
spans of the fields or a list of spans and the field idents (for tuple
|
||||
structs and record structs, respectively), or a list of these, for
|
||||
enums (one for each variant). For empty struct and empty enum
|
||||
variants, it is represented as a count of 0.
|
||||
|
||||
# Examples
|
||||
|
||||
The following simplified `PartialEq` is used for in-code examples:
|
||||
|
||||
```rust
|
||||
trait PartialEq {
|
||||
fn eq(&self, other: &Self);
|
||||
}
|
||||
impl PartialEq for int {
|
||||
fn eq(&self, other: &int) -> bool {
|
||||
*self == *other
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Some examples of the values of `SubstructureFields` follow, using the
|
||||
above `PartialEq`, `A`, `B` and `C`.
|
||||
|
||||
## Structs
|
||||
|
||||
When generating the `expr` for the `A` impl, the `SubstructureFields` is
|
||||
|
||||
~~~text
|
||||
Struct(~[FieldInfo {
|
||||
span: <span of x>
|
||||
name: Some(<ident of x>),
|
||||
self_: <expr for &self.x>,
|
||||
other: ~[<expr for &other.x]
|
||||
}])
|
||||
~~~
|
||||
|
||||
For the `B` impl, called with `B(a)` and `B(b)`,
|
||||
|
||||
~~~text
|
||||
Struct(~[FieldInfo {
|
||||
span: <span of `int`>,
|
||||
name: None,
|
||||
<expr for &a>
|
||||
~[<expr for &b>]
|
||||
}])
|
||||
~~~
|
||||
|
||||
## Enums
|
||||
|
||||
When generating the `expr` for a call with `self == C0(a)` and `other
|
||||
== C0(b)`, the SubstructureFields is
|
||||
|
||||
~~~text
|
||||
EnumMatching(0, <ast::Variant for C0>,
|
||||
~[FieldInfo {
|
||||
span: <span of int>
|
||||
name: None,
|
||||
self_: <expr for &a>,
|
||||
other: ~[<expr for &b>]
|
||||
}])
|
||||
~~~
|
||||
|
||||
For `C1 {x}` and `C1 {x}`,
|
||||
|
||||
~~~text
|
||||
EnumMatching(1, <ast::Variant for C1>,
|
||||
~[FieldInfo {
|
||||
span: <span of x>
|
||||
name: Some(<ident of x>),
|
||||
self_: <expr for &self.x>,
|
||||
other: ~[<expr for &other.x>]
|
||||
}])
|
||||
~~~
|
||||
|
||||
For `C0(a)` and `C1 {x}` ,
|
||||
|
||||
~~~text
|
||||
EnumNonMatching(~[(0, <ast::Variant for B0>,
|
||||
~[(<span of int>, None, <expr for &a>)]),
|
||||
(1, <ast::Variant for B1>,
|
||||
~[(<span of x>, Some(<ident of x>),
|
||||
<expr for &other.x>)])])
|
||||
~~~
|
||||
|
||||
(and vice versa, but with the order of the outermost list flipped.)
|
||||
|
||||
## Static
|
||||
|
||||
A static method on the above would result in,
|
||||
|
||||
~~~text
|
||||
StaticStruct(<ast::StructDef of A>, Named(~[(<ident of x>, <span of x>)]))
|
||||
|
||||
StaticStruct(<ast::StructDef of B>, Unnamed(~[<span of x>]))
|
||||
|
||||
StaticEnum(<ast::EnumDef of C>, ~[(<ident of C0>, <span of C0>, Unnamed(~[<span of int>])),
|
||||
(<ident of C1>, <span of C1>,
|
||||
Named(~[(<ident of x>, <span of x>)]))])
|
||||
~~~
|
||||
|
||||
*/
|
||||
//! Some code that abstracts away much of the boilerplate of writing
|
||||
//! `deriving` instances for traits. Among other things it manages getting
|
||||
//! access to the fields of the 4 different sorts of structs and enum
|
||||
//! variants, as well as creating the method and impl ast instances.
|
||||
//!
|
||||
//! Supported features (fairly exhaustive):
|
||||
//!
|
||||
//! - Methods taking any number of parameters of any type, and returning
|
||||
//! any type, other than vectors, bottom and closures.
|
||||
//! - Generating `impl`s for types with type parameters and lifetimes
|
||||
//! (e.g. `Option<T>`), the parameters are automatically given the
|
||||
//! current trait as a bound. (This includes separate type parameters
|
||||
//! and lifetimes for methods.)
|
||||
//! - Additional bounds on the type parameters, e.g. the `Ord` instance
|
||||
//! requires an explicit `PartialEq` bound at the
|
||||
//! moment. (`TraitDef.additional_bounds`)
|
||||
//!
|
||||
//! Unsupported: FIXME #6257: calling methods on reference fields,
|
||||
//! e.g. deriving Eq/Ord/Clone don't work on `struct A(&int)`,
|
||||
//! because of how the auto-dereferencing happens.
|
||||
//!
|
||||
//! The most important thing for implementers is the `Substructure` and
|
||||
//! `SubstructureFields` objects. The latter groups 5 possibilities of the
|
||||
//! arguments:
|
||||
//!
|
||||
//! - `Struct`, when `Self` is a struct (including tuple structs, e.g
|
||||
//! `struct T(int, char)`).
|
||||
//! - `EnumMatching`, when `Self` is an enum and all the arguments are the
|
||||
//! same variant of the enum (e.g. `Some(1)`, `Some(3)` and `Some(4)`)
|
||||
//! - `EnumNonMatching` when `Self` is an enum and the arguments are not
|
||||
//! the same variant (e.g. `None`, `Some(1)` and `None`). If
|
||||
//! `const_nonmatching` is true, this will contain an empty list.
|
||||
//! - `StaticEnum` and `StaticStruct` for static methods, where the type
|
||||
//! being derived upon is either an enum or struct respectively. (Any
|
||||
//! argument with type Self is just grouped among the non-self
|
||||
//! arguments.)
|
||||
//!
|
||||
//! In the first two cases, the values from the corresponding fields in
|
||||
//! all the arguments are grouped together. In the `EnumNonMatching` case
|
||||
//! this isn't possible (different variants have different fields), so the
|
||||
//! fields are grouped by which argument they come from. There are no
|
||||
//! fields with values in the static cases, so these are treated entirely
|
||||
//! differently.
|
||||
//!
|
||||
//! The non-static cases have `Option<ident>` in several places associated
|
||||
//! with field `expr`s. This represents the name of the field it is
|
||||
//! associated with. It is only not `None` when the associated field has
|
||||
//! an identifier in the source code. For example, the `x`s in the
|
||||
//! following snippet
|
||||
//!
|
||||
//! ```rust
|
||||
//! struct A { x : int }
|
||||
//!
|
||||
//! struct B(int);
|
||||
//!
|
||||
//! enum C {
|
||||
//! C0(int),
|
||||
//! C1 { x: int }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! The `int`s in `B` and `C0` don't have an identifier, so the
|
||||
//! `Option<ident>`s would be `None` for them.
|
||||
//!
|
||||
//! In the static cases, the structure is summarised, either into the just
|
||||
//! spans of the fields or a list of spans and the field idents (for tuple
|
||||
//! structs and record structs, respectively), or a list of these, for
|
||||
//! enums (one for each variant). For empty struct and empty enum
|
||||
//! variants, it is represented as a count of 0.
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! The following simplified `PartialEq` is used for in-code examples:
|
||||
//!
|
||||
//! ```rust
|
||||
//! trait PartialEq {
|
||||
//! fn eq(&self, other: &Self);
|
||||
//! }
|
||||
//! impl PartialEq for int {
|
||||
//! fn eq(&self, other: &int) -> bool {
|
||||
//! *self == *other
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! Some examples of the values of `SubstructureFields` follow, using the
|
||||
//! above `PartialEq`, `A`, `B` and `C`.
|
||||
//!
|
||||
//! ## Structs
|
||||
//!
|
||||
//! When generating the `expr` for the `A` impl, the `SubstructureFields` is
|
||||
//!
|
||||
//! ~~~text
|
||||
//! Struct(~[FieldInfo {
|
||||
//! span: <span of x>
|
||||
//! name: Some(<ident of x>),
|
||||
//! self_: <expr for &self.x>,
|
||||
//! other: ~[<expr for &other.x]
|
||||
//! }])
|
||||
//! ~~~
|
||||
//!
|
||||
//! For the `B` impl, called with `B(a)` and `B(b)`,
|
||||
//!
|
||||
//! ~~~text
|
||||
//! Struct(~[FieldInfo {
|
||||
//! span: <span of `int`>,
|
||||
//! name: None,
|
||||
//! <expr for &a>
|
||||
//! ~[<expr for &b>]
|
||||
//! }])
|
||||
//! ~~~
|
||||
//!
|
||||
//! ## Enums
|
||||
//!
|
||||
//! When generating the `expr` for a call with `self == C0(a)` and `other
|
||||
//! == C0(b)`, the SubstructureFields is
|
||||
//!
|
||||
//! ~~~text
|
||||
//! EnumMatching(0, <ast::Variant for C0>,
|
||||
//! ~[FieldInfo {
|
||||
//! span: <span of int>
|
||||
//! name: None,
|
||||
//! self_: <expr for &a>,
|
||||
//! other: ~[<expr for &b>]
|
||||
//! }])
|
||||
//! ~~~
|
||||
//!
|
||||
//! For `C1 {x}` and `C1 {x}`,
|
||||
//!
|
||||
//! ~~~text
|
||||
//! EnumMatching(1, <ast::Variant for C1>,
|
||||
//! ~[FieldInfo {
|
||||
//! span: <span of x>
|
||||
//! name: Some(<ident of x>),
|
||||
//! self_: <expr for &self.x>,
|
||||
//! other: ~[<expr for &other.x>]
|
||||
//! }])
|
||||
//! ~~~
|
||||
//!
|
||||
//! For `C0(a)` and `C1 {x}` ,
|
||||
//!
|
||||
//! ~~~text
|
||||
//! EnumNonMatching(~[(0, <ast::Variant for B0>,
|
||||
//! ~[(<span of int>, None, <expr for &a>)]),
|
||||
//! (1, <ast::Variant for B1>,
|
||||
//! ~[(<span of x>, Some(<ident of x>),
|
||||
//! <expr for &other.x>)])])
|
||||
//! ~~~
|
||||
//!
|
||||
//! (and vice versa, but with the order of the outermost list flipped.)
|
||||
//!
|
||||
//! ## Static
|
||||
//!
|
||||
//! A static method on the above would result in,
|
||||
//!
|
||||
//! ~~~text
|
||||
//! StaticStruct(<ast::StructDef of A>, Named(~[(<ident of x>, <span of x>)]))
|
||||
//!
|
||||
//! StaticStruct(<ast::StructDef of B>, Unnamed(~[<span of x>]))
|
||||
//!
|
||||
//! StaticEnum(<ast::EnumDef of C>, ~[(<ident of C0>, <span of C0>, Unnamed(~[<span of int>])),
|
||||
//! (<ident of C1>, <span of C1>,
|
||||
//! Named(~[(<ident of x>, <span of x>)]))])
|
||||
//! ~~~
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::gc::{Gc, GC};
|
||||
|
@ -25,8 +25,10 @@ use std::gc::Gc;
|
||||
|
||||
/// The types of pointers
|
||||
pub enum PtrTy<'a> {
|
||||
Send, // ~
|
||||
Borrowed(Option<&'a str>, ast::Mutability), // &['lifetime] [mut]
|
||||
/// ~
|
||||
Send,
|
||||
/// &'lifetime mut
|
||||
Borrowed(Option<&'a str>, ast::Mutability),
|
||||
}
|
||||
|
||||
/// A path, e.g. `::std::option::Option::<int>` (global). Has support
|
||||
@ -83,12 +85,12 @@ impl<'a> Path<'a> {
|
||||
/// A type. Supports pointers (except for *), Self, and literals
|
||||
pub enum Ty<'a> {
|
||||
Self,
|
||||
// &/Box/ Ty
|
||||
/// &/Box/ Ty
|
||||
Ptr(Box<Ty<'a>>, PtrTy<'a>),
|
||||
// mod::mod::Type<[lifetime], [Params...]>, including a plain type
|
||||
// parameter, and things like `int`
|
||||
/// mod::mod::Type<[lifetime], [Params...]>, including a plain type
|
||||
/// parameter, and things like `int`
|
||||
Literal(Path<'a>),
|
||||
// includes nil
|
||||
/// includes unit
|
||||
Tuple(Vec<Ty<'a>> )
|
||||
}
|
||||
|
||||
|
@ -55,8 +55,8 @@ pub fn expand_deriving_show(cx: &mut ExtCtxt,
|
||||
trait_def.expand(cx, mitem, item, push)
|
||||
}
|
||||
|
||||
// we construct a format string and then defer to std::fmt, since that
|
||||
// knows what's up with formatting at so on.
|
||||
/// We construct a format string and then defer to std::fmt, since that
|
||||
/// knows what's up with formatting and so on.
|
||||
fn show_substructure(cx: &mut ExtCtxt, span: Span,
|
||||
substr: &Substructure) -> Gc<Expr> {
|
||||
// build `<name>`, `<name>({}, {}, ...)` or `<name> { <field>: {},
|
||||
|
@ -246,11 +246,11 @@ pub fn expand_expr(e: Gc<ast::Expr>, fld: &mut MacroExpander) -> Gc<ast::Expr> {
|
||||
}
|
||||
}
|
||||
|
||||
// Rename loop label and expand its loop body
|
||||
//
|
||||
// The renaming procedure for loop is different in the sense that the loop
|
||||
// body is in a block enclosed by loop head so the renaming of loop label
|
||||
// must be propagated to the enclosed context.
|
||||
/// Rename loop label and expand its loop body
|
||||
///
|
||||
/// The renaming procedure for loop is different in the sense that the loop
|
||||
/// body is in a block enclosed by loop head so the renaming of loop label
|
||||
/// must be propagated to the enclosed context.
|
||||
fn expand_loop_block(loop_block: P<Block>,
|
||||
opt_ident: Option<Ident>,
|
||||
fld: &mut MacroExpander) -> (P<Block>, Option<Ident>) {
|
||||
|
@ -37,24 +37,24 @@ struct Context<'a, 'b> {
|
||||
ecx: &'a mut ExtCtxt<'b>,
|
||||
fmtsp: Span,
|
||||
|
||||
// Parsed argument expressions and the types that we've found so far for
|
||||
// them.
|
||||
/// Parsed argument expressions and the types that we've found so far for
|
||||
/// them.
|
||||
args: Vec<Gc<ast::Expr>>,
|
||||
arg_types: Vec<Option<ArgumentType>>,
|
||||
// Parsed named expressions and the types that we've found for them so far.
|
||||
// Note that we keep a side-array of the ordering of the named arguments
|
||||
// found to be sure that we can translate them in the same order that they
|
||||
// were declared in.
|
||||
/// Parsed named expressions and the types that we've found for them so far.
|
||||
/// Note that we keep a side-array of the ordering of the named arguments
|
||||
/// found to be sure that we can translate them in the same order that they
|
||||
/// were declared in.
|
||||
names: HashMap<String, Gc<ast::Expr>>,
|
||||
name_types: HashMap<String, ArgumentType>,
|
||||
name_ordering: Vec<String>,
|
||||
|
||||
// Collection of the compiled `rt::Piece` structures
|
||||
/// Collection of the compiled `rt::Piece` structures
|
||||
pieces: Vec<Gc<ast::Expr>>,
|
||||
name_positions: HashMap<String, uint>,
|
||||
method_statics: Vec<Gc<ast::Item>>,
|
||||
|
||||
// Updated as arguments are consumed or methods are entered
|
||||
/// Updated as arguments are consumed or methods are entered
|
||||
nest_level: uint,
|
||||
next_arg: uint,
|
||||
}
|
||||
|
@ -21,16 +21,16 @@ use std::cell::RefCell;
|
||||
use std::rc::Rc;
|
||||
use std::collections::HashMap;
|
||||
|
||||
// the SCTable contains a table of SyntaxContext_'s. It
|
||||
// represents a flattened tree structure, to avoid having
|
||||
// managed pointers everywhere (that caused an ICE).
|
||||
// the mark_memo and rename_memo fields are side-tables
|
||||
// that ensure that adding the same mark to the same context
|
||||
// gives you back the same context as before. This shouldn't
|
||||
// change the semantics--everything here is immutable--but
|
||||
// it should cut down on memory use *a lot*; applying a mark
|
||||
// to a tree containing 50 identifiers would otherwise generate
|
||||
// 50 new contexts
|
||||
/// The SCTable contains a table of SyntaxContext_'s. It
|
||||
/// represents a flattened tree structure, to avoid having
|
||||
/// managed pointers everywhere (that caused an ICE).
|
||||
/// the mark_memo and rename_memo fields are side-tables
|
||||
/// that ensure that adding the same mark to the same context
|
||||
/// gives you back the same context as before. This shouldn't
|
||||
/// change the semantics--everything here is immutable--but
|
||||
/// it should cut down on memory use *a lot*; applying a mark
|
||||
/// to a tree containing 50 identifiers would otherwise generate
|
||||
/// 50 new contexts
|
||||
pub struct SCTable {
|
||||
table: RefCell<Vec<SyntaxContext_>>,
|
||||
mark_memo: RefCell<HashMap<(SyntaxContext,Mrk),SyntaxContext>>,
|
||||
@ -41,16 +41,16 @@ pub struct SCTable {
|
||||
pub enum SyntaxContext_ {
|
||||
EmptyCtxt,
|
||||
Mark (Mrk,SyntaxContext),
|
||||
// flattening the name and syntaxcontext into the rename...
|
||||
// HIDDEN INVARIANTS:
|
||||
// 1) the first name in a Rename node
|
||||
// can only be a programmer-supplied name.
|
||||
// 2) Every Rename node with a given Name in the
|
||||
// "to" slot must have the same name and context
|
||||
// in the "from" slot. In essence, they're all
|
||||
// pointers to a single "rename" event node.
|
||||
/// flattening the name and syntaxcontext into the rename...
|
||||
/// HIDDEN INVARIANTS:
|
||||
/// 1) the first name in a Rename node
|
||||
/// can only be a programmer-supplied name.
|
||||
/// 2) Every Rename node with a given Name in the
|
||||
/// "to" slot must have the same name and context
|
||||
/// in the "from" slot. In essence, they're all
|
||||
/// pointers to a single "rename" event node.
|
||||
Rename (Ident,Name,SyntaxContext),
|
||||
// actually, IllegalCtxt may not be necessary.
|
||||
/// actually, IllegalCtxt may not be necessary.
|
||||
IllegalCtxt
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ pub fn apply_mark(m: Mrk, ctxt: SyntaxContext) -> SyntaxContext {
|
||||
with_sctable(|table| apply_mark_internal(m, ctxt, table))
|
||||
}
|
||||
|
||||
// Extend a syntax context with a given mark and sctable (explicit memoization)
|
||||
/// Extend a syntax context with a given mark and sctable (explicit memoization)
|
||||
fn apply_mark_internal(m: Mrk, ctxt: SyntaxContext, table: &SCTable) -> SyntaxContext {
|
||||
let key = (ctxt, m);
|
||||
let new_ctxt = |_: &(SyntaxContext, Mrk)|
|
||||
@ -77,7 +77,7 @@ pub fn apply_rename(id: Ident, to:Name,
|
||||
with_sctable(|table| apply_rename_internal(id, to, ctxt, table))
|
||||
}
|
||||
|
||||
// Extend a syntax context with a given rename and sctable (explicit memoization)
|
||||
/// Extend a syntax context with a given rename and sctable (explicit memoization)
|
||||
fn apply_rename_internal(id: Ident,
|
||||
to: Name,
|
||||
ctxt: SyntaxContext,
|
||||
@ -141,7 +141,7 @@ pub fn clear_tables() {
|
||||
with_resolve_table_mut(|table| *table = HashMap::new());
|
||||
}
|
||||
|
||||
// Add a value to the end of a vec, return its index
|
||||
/// Add a value to the end of a vec, return its index
|
||||
fn idx_push<T>(vec: &mut Vec<T> , val: T) -> u32 {
|
||||
vec.push(val);
|
||||
(vec.len() - 1) as u32
|
||||
@ -173,8 +173,8 @@ fn with_resolve_table_mut<T>(op: |&mut ResolveTable| -> T) -> T {
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve a syntax object to a name, per MTWT.
|
||||
// adding memoization to resolve 500+ seconds in resolve for librustc (!)
|
||||
/// Resolve a syntax object to a name, per MTWT.
|
||||
/// adding memoization to resolve 500+ seconds in resolve for librustc (!)
|
||||
fn resolve_internal(id: Ident,
|
||||
table: &SCTable,
|
||||
resolve_table: &mut ResolveTable) -> Name {
|
||||
@ -264,8 +264,8 @@ pub fn outer_mark(ctxt: SyntaxContext) -> Mrk {
|
||||
})
|
||||
}
|
||||
|
||||
// Push a name... unless it matches the one on top, in which
|
||||
// case pop and discard (so two of the same marks cancel)
|
||||
/// Push a name... unless it matches the one on top, in which
|
||||
/// case pop and discard (so two of the same marks cancel)
|
||||
fn xor_push(marks: &mut Vec<Mrk>, mark: Mrk) {
|
||||
if (marks.len() > 0) && (*marks.last().unwrap() == mark) {
|
||||
marks.pop().unwrap();
|
||||
|
@ -28,7 +28,7 @@ use std::str;
|
||||
// the column/row/filename of the expression, or they include
|
||||
// a given file into the current one.
|
||||
|
||||
/* line!(): expands to the current line number */
|
||||
/// line!(): expands to the current line number
|
||||
pub fn expand_line(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
|
||||
-> Box<base::MacResult> {
|
||||
base::check_zero_tts(cx, sp, tts, "line!");
|
||||
@ -49,9 +49,9 @@ pub fn expand_col(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
|
||||
base::MacExpr::new(cx.expr_uint(topmost.call_site, loc.col.to_uint()))
|
||||
}
|
||||
|
||||
/* file!(): expands to the current filename */
|
||||
/* The filemap (`loc.file`) contains a bunch more information we could spit
|
||||
* out if we wanted. */
|
||||
/// file!(): expands to the current filename */
|
||||
/// The filemap (`loc.file`) contains a bunch more information we could spit
|
||||
/// out if we wanted.
|
||||
pub fn expand_file(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
|
||||
-> Box<base::MacResult> {
|
||||
base::check_zero_tts(cx, sp, tts, "file!");
|
||||
@ -82,9 +82,9 @@ pub fn expand_mod(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
|
||||
token::intern_and_get_ident(string.as_slice())))
|
||||
}
|
||||
|
||||
// include! : parse the given file as an expr
|
||||
// This is generally a bad idea because it's going to behave
|
||||
// unhygienically.
|
||||
/// include! : parse the given file as an expr
|
||||
/// This is generally a bad idea because it's going to behave
|
||||
/// unhygienically.
|
||||
pub fn expand_include(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree])
|
||||
-> Box<base::MacResult> {
|
||||
let file = match get_single_str_from_tts(cx, sp, tts, "include!") {
|
||||
|
@ -8,7 +8,72 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
// Earley-like parser for macros.
|
||||
//! This is an Earley-like parser, without support for in-grammar nonterminals,
|
||||
//! only by calling out to the main rust parser for named nonterminals (which it
|
||||
//! commits to fully when it hits one in a grammar). This means that there are no
|
||||
//! completer or predictor rules, and therefore no need to store one column per
|
||||
//! token: instead, there's a set of current Earley items and a set of next
|
||||
//! ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
|
||||
//! pathological cases, is worse than traditional Earley parsing, but it's an
|
||||
//! easier fit for Macro-by-Example-style rules, and I think the overhead is
|
||||
//! lower. (In order to prevent the pathological case, we'd need to lazily
|
||||
//! construct the resulting `NamedMatch`es at the very end. It'd be a pain,
|
||||
//! and require more memory to keep around old items, but it would also save
|
||||
//! overhead)
|
||||
//!
|
||||
//! Quick intro to how the parser works:
|
||||
//!
|
||||
//! A 'position' is a dot in the middle of a matcher, usually represented as a
|
||||
//! dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
|
||||
//!
|
||||
//! The parser walks through the input a character at a time, maintaining a list
|
||||
//! of items consistent with the current position in the input string: `cur_eis`.
|
||||
//!
|
||||
//! As it processes them, it fills up `eof_eis` with items that would be valid if
|
||||
//! the macro invocation is now over, `bb_eis` with items that are waiting on
|
||||
//! a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
|
||||
//! on the a particular token. Most of the logic concerns moving the · through the
|
||||
//! repetitions indicated by Kleene stars. It only advances or calls out to the
|
||||
//! real Rust parser when no `cur_eis` items remain
|
||||
//!
|
||||
//! Example: Start parsing `a a a a b` against [· a $( a )* a b].
|
||||
//!
|
||||
//! Remaining input: `a a a a b`
|
||||
//! next_eis: [· a $( a )* a b]
|
||||
//!
|
||||
//! - - - Advance over an `a`. - - -
|
||||
//!
|
||||
//! Remaining input: `a a a b`
|
||||
//! cur: [a · $( a )* a b]
|
||||
//! Descend/Skip (first item).
|
||||
//! next: [a $( · a )* a b] [a $( a )* · a b].
|
||||
//!
|
||||
//! - - - Advance over an `a`. - - -
|
||||
//!
|
||||
//! Remaining input: `a a b`
|
||||
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
|
||||
//! Finish/Repeat (first item)
|
||||
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
|
||||
//!
|
||||
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
|
||||
//!
|
||||
//! Remaining input: `a b`
|
||||
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
|
||||
//! Finish/Repeat (first item)
|
||||
//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
|
||||
//!
|
||||
//! - - - Advance over an `a`. - - - (this looks exactly like the last step)
|
||||
//!
|
||||
//! Remaining input: `b`
|
||||
//! cur: [a $( a · )* a b] next: [a $( a )* a · b]
|
||||
//! Finish/Repeat (first item)
|
||||
//! next: [a $( a )* · a b] [a $( · a )* a b]
|
||||
//!
|
||||
//! - - - Advance over a `b`. - - -
|
||||
//!
|
||||
//! Remaining input: ``
|
||||
//! eof: [a $( a )* a b ·]
|
||||
|
||||
|
||||
use ast;
|
||||
use ast::{Matcher, MatchTok, MatchSeq, MatchNonterminal, Ident};
|
||||
@ -25,75 +90,6 @@ use std::rc::Rc;
|
||||
use std::gc::GC;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/* This is an Earley-like parser, without support for in-grammar nonterminals,
|
||||
only by calling out to the main rust parser for named nonterminals (which it
|
||||
commits to fully when it hits one in a grammar). This means that there are no
|
||||
completer or predictor rules, and therefore no need to store one column per
|
||||
token: instead, there's a set of current Earley items and a set of next
|
||||
ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
|
||||
pathological cases, is worse than traditional Earley parsing, but it's an
|
||||
easier fit for Macro-by-Example-style rules, and I think the overhead is
|
||||
lower. (In order to prevent the pathological case, we'd need to lazily
|
||||
construct the resulting `NamedMatch`es at the very end. It'd be a pain,
|
||||
and require more memory to keep around old items, but it would also save
|
||||
overhead)*/
|
||||
|
||||
/* Quick intro to how the parser works:
|
||||
|
||||
A 'position' is a dot in the middle of a matcher, usually represented as a
|
||||
dot. For example `· a $( a )* a b` is a position, as is `a $( · a )* a b`.
|
||||
|
||||
The parser walks through the input a character at a time, maintaining a list
|
||||
of items consistent with the current position in the input string: `cur_eis`.
|
||||
|
||||
As it processes them, it fills up `eof_eis` with items that would be valid if
|
||||
the macro invocation is now over, `bb_eis` with items that are waiting on
|
||||
a Rust nonterminal like `$e:expr`, and `next_eis` with items that are waiting
|
||||
on the a particular token. Most of the logic concerns moving the · through the
|
||||
repetitions indicated by Kleene stars. It only advances or calls out to the
|
||||
real Rust parser when no `cur_eis` items remain
|
||||
|
||||
Example: Start parsing `a a a a b` against [· a $( a )* a b].
|
||||
|
||||
Remaining input: `a a a a b`
|
||||
next_eis: [· a $( a )* a b]
|
||||
|
||||
- - - Advance over an `a`. - - -
|
||||
|
||||
Remaining input: `a a a b`
|
||||
cur: [a · $( a )* a b]
|
||||
Descend/Skip (first item).
|
||||
next: [a $( · a )* a b] [a $( a )* · a b].
|
||||
|
||||
- - - Advance over an `a`. - - -
|
||||
|
||||
Remaining input: `a a b`
|
||||
cur: [a $( a · )* a b] next: [a $( a )* a · b]
|
||||
Finish/Repeat (first item)
|
||||
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
|
||||
|
||||
- - - Advance over an `a`. - - - (this looks exactly like the last step)
|
||||
|
||||
Remaining input: `a b`
|
||||
cur: [a $( a · )* a b] next: [a $( a )* a · b]
|
||||
Finish/Repeat (first item)
|
||||
next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
|
||||
|
||||
- - - Advance over an `a`. - - - (this looks exactly like the last step)
|
||||
|
||||
Remaining input: `b`
|
||||
cur: [a $( a · )* a b] next: [a $( a )* a · b]
|
||||
Finish/Repeat (first item)
|
||||
next: [a $( a )* · a b] [a $( · a )* a b]
|
||||
|
||||
- - - Advance over a `b`. - - -
|
||||
|
||||
Remaining input: ``
|
||||
eof: [a $( a )* a b ·]
|
||||
|
||||
*/
|
||||
|
||||
|
||||
/* to avoid costly uniqueness checks, we require that `MatchSeq` always has a
|
||||
nonempty body. */
|
||||
|
||||
@ -147,24 +143,24 @@ pub fn initial_matcher_pos(ms: Vec<Matcher> , sep: Option<Token>, lo: BytePos)
|
||||
}
|
||||
}
|
||||
|
||||
// NamedMatch is a pattern-match result for a single ast::MatchNonterminal:
|
||||
// so it is associated with a single ident in a parse, and all
|
||||
// MatchedNonterminal's in the NamedMatch have the same nonterminal type
|
||||
// (expr, item, etc). All the leaves in a single NamedMatch correspond to a
|
||||
// single matcher_nonterminal in the ast::Matcher that produced it.
|
||||
//
|
||||
// It should probably be renamed, it has more or less exact correspondence to
|
||||
// ast::match nodes, and the in-memory structure of a particular NamedMatch
|
||||
// represents the match that occurred when a particular subset of an
|
||||
// ast::match -- those ast::Matcher nodes leading to a single
|
||||
// MatchNonterminal -- was applied to a particular token tree.
|
||||
//
|
||||
// The width of each MatchedSeq in the NamedMatch, and the identity of the
|
||||
// MatchedNonterminal's, will depend on the token tree it was applied to: each
|
||||
// MatchedSeq corresponds to a single MatchSeq in the originating
|
||||
// ast::Matcher. The depth of the NamedMatch structure will therefore depend
|
||||
// only on the nesting depth of ast::MatchSeq's in the originating
|
||||
// ast::Matcher it was derived from.
|
||||
/// NamedMatch is a pattern-match result for a single ast::MatchNonterminal:
|
||||
/// so it is associated with a single ident in a parse, and all
|
||||
/// MatchedNonterminal's in the NamedMatch have the same nonterminal type
|
||||
/// (expr, item, etc). All the leaves in a single NamedMatch correspond to a
|
||||
/// single matcher_nonterminal in the ast::Matcher that produced it.
|
||||
///
|
||||
/// It should probably be renamed, it has more or less exact correspondence to
|
||||
/// ast::match nodes, and the in-memory structure of a particular NamedMatch
|
||||
/// represents the match that occurred when a particular subset of an
|
||||
/// ast::match -- those ast::Matcher nodes leading to a single
|
||||
/// MatchNonterminal -- was applied to a particular token tree.
|
||||
///
|
||||
/// The width of each MatchedSeq in the NamedMatch, and the identity of the
|
||||
/// MatchedNonterminal's, will depend on the token tree it was applied to: each
|
||||
/// MatchedSeq corresponds to a single MatchSeq in the originating
|
||||
/// ast::Matcher. The depth of the NamedMatch structure will therefore depend
|
||||
/// only on the nesting depth of ast::MatchSeq's in the originating
|
||||
/// ast::Matcher it was derived from.
|
||||
|
||||
pub enum NamedMatch {
|
||||
MatchedSeq(Vec<Rc<NamedMatch>>, codemap::Span),
|
||||
@ -224,7 +220,8 @@ pub fn parse_or_else(sess: &ParseSess,
|
||||
}
|
||||
}
|
||||
|
||||
// perform a token equality check, ignoring syntax context (that is, an unhygienic comparison)
|
||||
/// Perform a token equality check, ignoring syntax context (that is, an
|
||||
/// unhygienic comparison)
|
||||
pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool {
|
||||
match (t1,t2) {
|
||||
(&token::IDENT(id1,_),&token::IDENT(id2,_))
|
||||
|
@ -119,7 +119,7 @@ impl MacResult for MacroRulesDefiner {
|
||||
}
|
||||
}
|
||||
|
||||
// Given `lhses` and `rhses`, this is the new macro we create
|
||||
/// Given `lhses` and `rhses`, this is the new macro we create
|
||||
fn generic_extension(cx: &ExtCtxt,
|
||||
sp: Span,
|
||||
name: Ident,
|
||||
@ -193,9 +193,9 @@ fn generic_extension(cx: &ExtCtxt,
|
||||
cx.span_fatal(best_fail_spot, best_fail_msg.as_slice());
|
||||
}
|
||||
|
||||
// this procedure performs the expansion of the
|
||||
// macro_rules! macro. It parses the RHS and adds
|
||||
// an extension to the current context.
|
||||
/// This procedure performs the expansion of the
|
||||
/// macro_rules! macro. It parses the RHS and adds
|
||||
/// an extension to the current context.
|
||||
pub fn add_new_extension(cx: &mut ExtCtxt,
|
||||
sp: Span,
|
||||
name: Ident,
|
||||
|
@ -32,7 +32,7 @@ struct TtFrame {
|
||||
#[deriving(Clone)]
|
||||
pub struct TtReader<'a> {
|
||||
pub sp_diag: &'a SpanHandler,
|
||||
// the unzipped tree:
|
||||
/// the unzipped tree:
|
||||
stack: Vec<TtFrame>,
|
||||
/* for MBE-style macro transcription */
|
||||
interpolations: HashMap<Ident, Rc<NamedMatch>>,
|
||||
@ -43,9 +43,9 @@ pub struct TtReader<'a> {
|
||||
pub cur_span: Span,
|
||||
}
|
||||
|
||||
/** This can do Macro-By-Example transcription. On the other hand, if
|
||||
* `src` contains no `TTSeq`s and `TTNonterminal`s, `interp` can (and
|
||||
* should) be none. */
|
||||
/// This can do Macro-By-Example transcription. On the other hand, if
|
||||
/// `src` contains no `TTSeq`s and `TTNonterminal`s, `interp` can (and
|
||||
/// should) be none.
|
||||
pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler,
|
||||
interp: Option<HashMap<Ident, Rc<NamedMatch>>>,
|
||||
src: Vec<ast::TokenTree> )
|
||||
@ -138,8 +138,8 @@ fn lockstep_iter_size(t: &TokenTree, r: &TtReader) -> LockstepIterSize {
|
||||
}
|
||||
}
|
||||
|
||||
// return the next token from the TtReader.
|
||||
// EFFECT: advances the reader's token field
|
||||
/// Return the next token from the TtReader.
|
||||
/// EFFECT: advances the reader's token field
|
||||
pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan {
|
||||
// FIXME(pcwalton): Bad copy?
|
||||
let ret_val = TokenAndSpan {
|
||||
|
@ -8,15 +8,11 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
/*!
|
||||
|
||||
The Rust parser and macro expander.
|
||||
|
||||
# Note
|
||||
|
||||
This API is completely unstable and subject to change.
|
||||
|
||||
*/
|
||||
//! The Rust parser and macro expander.
|
||||
//!
|
||||
//! # Note
|
||||
//!
|
||||
//! This API is completely unstable and subject to change.
|
||||
|
||||
#![crate_id = "syntax#0.11.0"] // NOTE: remove after stage0
|
||||
#![crate_name = "syntax"]
|
||||
|
@ -18,7 +18,7 @@ use parse::token::INTERPOLATED;
|
||||
|
||||
use std::gc::{Gc, GC};
|
||||
|
||||
// a parser that can parse attributes.
|
||||
/// A parser that can parse attributes.
|
||||
pub trait ParserAttr {
|
||||
fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute>;
|
||||
fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute;
|
||||
@ -30,7 +30,7 @@ pub trait ParserAttr {
|
||||
}
|
||||
|
||||
impl<'a> ParserAttr for Parser<'a> {
|
||||
// Parse attributes that appear before an item
|
||||
/// Parse attributes that appear before an item
|
||||
fn parse_outer_attributes(&mut self) -> Vec<ast::Attribute> {
|
||||
let mut attrs: Vec<ast::Attribute> = Vec::new();
|
||||
loop {
|
||||
@ -59,10 +59,10 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||
return attrs;
|
||||
}
|
||||
|
||||
// matches attribute = # ! [ meta_item ]
|
||||
//
|
||||
// if permit_inner is true, then a leading `!` indicates an inner
|
||||
// attribute
|
||||
/// Matches `attribute = # ! [ meta_item ]`
|
||||
///
|
||||
/// If permit_inner is true, then a leading `!` indicates an inner
|
||||
/// attribute
|
||||
fn parse_attribute(&mut self, permit_inner: bool) -> ast::Attribute {
|
||||
debug!("parse_attributes: permit_inner={:?} self.token={:?}",
|
||||
permit_inner, self.token);
|
||||
@ -114,17 +114,17 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||
};
|
||||
}
|
||||
|
||||
// Parse attributes that appear after the opening of an item. These should
|
||||
// be preceded by an exclamation mark, but we accept and warn about one
|
||||
// terminated by a semicolon. In addition to a vector of inner attributes,
|
||||
// this function also returns a vector that may contain the first outer
|
||||
// attribute of the next item (since we can't know whether the attribute
|
||||
// is an inner attribute of the containing item or an outer attribute of
|
||||
// the first contained item until we see the semi).
|
||||
/// Parse attributes that appear after the opening of an item. These should
|
||||
/// be preceded by an exclamation mark, but we accept and warn about one
|
||||
/// terminated by a semicolon. In addition to a vector of inner attributes,
|
||||
/// this function also returns a vector that may contain the first outer
|
||||
/// attribute of the next item (since we can't know whether the attribute
|
||||
/// is an inner attribute of the containing item or an outer attribute of
|
||||
/// the first contained item until we see the semi).
|
||||
|
||||
// matches inner_attrs* outer_attr?
|
||||
// you can make the 'next' field an Option, but the result is going to be
|
||||
// more useful as a vector.
|
||||
/// matches inner_attrs* outer_attr?
|
||||
/// you can make the 'next' field an Option, but the result is going to be
|
||||
/// more useful as a vector.
|
||||
fn parse_inner_attrs_and_next(&mut self)
|
||||
-> (Vec<ast::Attribute> , Vec<ast::Attribute> ) {
|
||||
let mut inner_attrs: Vec<ast::Attribute> = Vec::new();
|
||||
@ -157,9 +157,9 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||
(inner_attrs, next_outer_attrs)
|
||||
}
|
||||
|
||||
// matches meta_item = IDENT
|
||||
// | IDENT = lit
|
||||
// | IDENT meta_seq
|
||||
/// matches meta_item = IDENT
|
||||
/// | IDENT = lit
|
||||
/// | IDENT meta_seq
|
||||
fn parse_meta_item(&mut self) -> Gc<ast::MetaItem> {
|
||||
match self.token {
|
||||
token::INTERPOLATED(token::NtMeta(e)) => {
|
||||
@ -201,7 +201,7 @@ impl<'a> ParserAttr for Parser<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
// matches meta_seq = ( COMMASEP(meta_item) )
|
||||
/// matches meta_seq = ( COMMASEP(meta_item) )
|
||||
fn parse_meta_seq(&mut self) -> Vec<Gc<ast::MetaItem>> {
|
||||
self.parse_seq(&token::LPAREN,
|
||||
&token::RPAREN,
|
||||
|
@ -15,13 +15,13 @@
|
||||
use ast;
|
||||
use std::gc::Gc;
|
||||
|
||||
// does this expression require a semicolon to be treated
|
||||
// as a statement? The negation of this: 'can this expression
|
||||
// be used as a statement without a semicolon' -- is used
|
||||
// as an early-bail-out in the parser so that, for instance,
|
||||
// 'if true {...} else {...}
|
||||
// |x| 5 '
|
||||
// isn't parsed as (if true {...} else {...} | x) | 5
|
||||
/// Does this expression require a semicolon to be treated
|
||||
/// as a statement? The negation of this: 'can this expression
|
||||
/// be used as a statement without a semicolon' -- is used
|
||||
/// as an early-bail-out in the parser so that, for instance,
|
||||
/// if true {...} else {...}
|
||||
/// |x| 5
|
||||
/// isn't parsed as (if true {...} else {...} | x) | 5
|
||||
pub fn expr_requires_semi_to_be_stmt(e: Gc<ast::Expr>) -> bool {
|
||||
match e.node {
|
||||
ast::ExprIf(..)
|
||||
@ -41,9 +41,9 @@ pub fn expr_is_simple_block(e: Gc<ast::Expr>) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
// this statement requires a semicolon after it.
|
||||
// note that in one case (stmt_semi), we've already
|
||||
// seen the semicolon, and thus don't need another.
|
||||
/// this statement requires a semicolon after it.
|
||||
/// note that in one case (stmt_semi), we've already
|
||||
/// seen the semicolon, and thus don't need another.
|
||||
pub fn stmt_ends_with_semi(stmt: &ast::Stmt) -> bool {
|
||||
return match stmt.node {
|
||||
ast::StmtDecl(d, _) => {
|
||||
|
@ -12,8 +12,8 @@
|
||||
|
||||
use parse::token;
|
||||
|
||||
// SeqSep : a sequence separator (token)
|
||||
// and whether a trailing separator is allowed.
|
||||
/// SeqSep : a sequence separator (token)
|
||||
/// and whether a trailing separator is allowed.
|
||||
pub struct SeqSep {
|
||||
pub sep: Option<token::Token>,
|
||||
pub trailing_sep_allowed: bool
|
||||
|
@ -24,10 +24,14 @@ use std::uint;
|
||||
|
||||
#[deriving(Clone, PartialEq)]
|
||||
pub enum CommentStyle {
|
||||
Isolated, // No code on either side of each line of the comment
|
||||
Trailing, // Code exists to the left of the comment
|
||||
Mixed, // Code before /* foo */ and after the comment
|
||||
BlankLine, // Just a manual blank line "\n\n", for layout
|
||||
/// No code on either side of each line of the comment
|
||||
Isolated,
|
||||
/// Code exists to the left of the comment
|
||||
Trailing,
|
||||
/// Code before /* foo */ and after the comment
|
||||
Mixed,
|
||||
/// Just a manual blank line "\n\n", for layout
|
||||
BlankLine,
|
||||
}
|
||||
|
||||
#[deriving(Clone)]
|
||||
@ -198,9 +202,9 @@ fn read_line_comments(rdr: &mut StringReader, code_to_the_left: bool,
|
||||
}
|
||||
}
|
||||
|
||||
// Returns None if the first col chars of s contain a non-whitespace char.
|
||||
// Otherwise returns Some(k) where k is first char offset after that leading
|
||||
// whitespace. Note k may be outside bounds of s.
|
||||
/// Returns None if the first col chars of s contain a non-whitespace char.
|
||||
/// Otherwise returns Some(k) where k is first char offset after that leading
|
||||
/// whitespace. Note k may be outside bounds of s.
|
||||
fn all_whitespace(s: &str, col: CharPos) -> Option<uint> {
|
||||
let len = s.len();
|
||||
let mut col = col.to_uint();
|
||||
|
@ -44,13 +44,13 @@ pub struct TokenAndSpan {
|
||||
|
||||
pub struct StringReader<'a> {
|
||||
pub span_diagnostic: &'a SpanHandler,
|
||||
// The absolute offset within the codemap of the next character to read
|
||||
/// The absolute offset within the codemap of the next character to read
|
||||
pub pos: BytePos,
|
||||
// The absolute offset within the codemap of the last character read(curr)
|
||||
/// The absolute offset within the codemap of the last character read(curr)
|
||||
pub last_pos: BytePos,
|
||||
// The column of the next character to read
|
||||
/// The column of the next character to read
|
||||
pub col: CharPos,
|
||||
// The last character to be read
|
||||
/// The last character to be read
|
||||
pub curr: Option<char>,
|
||||
pub filemap: Rc<codemap::FileMap>,
|
||||
/* cached: */
|
||||
@ -60,7 +60,7 @@ pub struct StringReader<'a> {
|
||||
|
||||
impl<'a> Reader for StringReader<'a> {
|
||||
fn is_eof(&self) -> bool { self.curr.is_none() }
|
||||
// return the next token. EFFECT: advances the string_reader.
|
||||
/// Return the next token. EFFECT: advances the string_reader.
|
||||
fn next_token(&mut self) -> TokenAndSpan {
|
||||
let ret_val = TokenAndSpan {
|
||||
tok: replace(&mut self.peek_tok, token::UNDERSCORE),
|
||||
@ -417,7 +417,7 @@ impl<'a> StringReader<'a> {
|
||||
return self.consume_any_line_comment();
|
||||
}
|
||||
|
||||
// might return a sugared-doc-attr
|
||||
/// Might return a sugared-doc-attr
|
||||
fn consume_block_comment(&mut self) -> Option<TokenAndSpan> {
|
||||
// block comments starting with "/**" or "/*!" are doc-comments
|
||||
let is_doc_comment = self.curr_is('*') || self.curr_is('!');
|
||||
|
@ -10,7 +10,6 @@
|
||||
|
||||
//! The main parser interface
|
||||
|
||||
|
||||
use ast;
|
||||
use codemap::{Span, CodeMap, FileMap};
|
||||
use diagnostic::{SpanHandler, mk_span_handler, default_handler, Auto};
|
||||
@ -32,7 +31,7 @@ pub mod common;
|
||||
pub mod classify;
|
||||
pub mod obsolete;
|
||||
|
||||
// info about a parsing session.
|
||||
/// Info about a parsing session.
|
||||
pub struct ParseSess {
|
||||
pub span_diagnostic: SpanHandler, // better be the same as the one in the reader!
|
||||
/// Used to determine and report recursive mod inclusions
|
||||
@ -241,14 +240,14 @@ pub fn file_to_filemap(sess: &ParseSess, path: &Path, spanopt: Option<Span>)
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
// given a session and a string, add the string to
|
||||
// the session's codemap and return the new filemap
|
||||
/// Given a session and a string, add the string to
|
||||
/// the session's codemap and return the new filemap
|
||||
pub fn string_to_filemap(sess: &ParseSess, source: String, path: String)
|
||||
-> Rc<FileMap> {
|
||||
sess.span_diagnostic.cm.new_filemap(path, source)
|
||||
}
|
||||
|
||||
// given a filemap, produce a sequence of token-trees
|
||||
/// Given a filemap, produce a sequence of token-trees
|
||||
pub fn filemap_to_tts(sess: &ParseSess, filemap: Rc<FileMap>)
|
||||
-> Vec<ast::TokenTree> {
|
||||
// it appears to me that the cfg doesn't matter here... indeed,
|
||||
@ -259,7 +258,7 @@ pub fn filemap_to_tts(sess: &ParseSess, filemap: Rc<FileMap>)
|
||||
p1.parse_all_token_trees()
|
||||
}
|
||||
|
||||
// given tts and cfg, produce a parser
|
||||
/// Given tts and cfg, produce a parser
|
||||
pub fn tts_to_parser<'a>(sess: &'a ParseSess,
|
||||
tts: Vec<ast::TokenTree>,
|
||||
cfg: ast::CrateConfig) -> Parser<'a> {
|
||||
@ -267,7 +266,7 @@ pub fn tts_to_parser<'a>(sess: &'a ParseSess,
|
||||
Parser::new(sess, cfg, box trdr)
|
||||
}
|
||||
|
||||
// abort if necessary
|
||||
/// Abort if necessary
|
||||
pub fn maybe_aborted<T>(result: T, mut p: Parser) -> T {
|
||||
p.abort_if_errors();
|
||||
result
|
||||
|
@ -38,8 +38,8 @@ pub enum ObsoleteSyntax {
|
||||
pub trait ParserObsoleteMethods {
|
||||
/// Reports an obsolete syntax non-fatal error.
|
||||
fn obsolete(&mut self, sp: Span, kind: ObsoleteSyntax);
|
||||
// Reports an obsolete syntax non-fatal error, and returns
|
||||
// a placeholder expression
|
||||
/// Reports an obsolete syntax non-fatal error, and returns
|
||||
/// a placeholder expression
|
||||
fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc<Expr>;
|
||||
fn report(&mut self,
|
||||
sp: Span,
|
||||
@ -83,8 +83,8 @@ impl<'a> ParserObsoleteMethods for parser::Parser<'a> {
|
||||
self.report(sp, kind, kind_str, desc);
|
||||
}
|
||||
|
||||
// Reports an obsolete syntax non-fatal error, and returns
|
||||
// a placeholder expression
|
||||
/// Reports an obsolete syntax non-fatal error, and returns
|
||||
/// a placeholder expression
|
||||
fn obsolete_expr(&mut self, sp: Span, kind: ObsoleteSyntax) -> Gc<Expr> {
|
||||
self.obsolete(sp, kind);
|
||||
self.mk_expr(sp.lo, sp.hi, ExprLit(box(GC) respan(sp, LitNil)))
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -92,9 +92,9 @@ pub enum Token {
|
||||
LIT_BINARY_RAW(Rc<Vec<u8>>, uint), /* raw binary str delimited by n hash symbols */
|
||||
|
||||
/* Name components */
|
||||
// an identifier contains an "is_mod_name" boolean,
|
||||
// indicating whether :: follows this token with no
|
||||
// whitespace in between.
|
||||
/// An identifier contains an "is_mod_name" boolean,
|
||||
/// indicating whether :: follows this token with no
|
||||
/// whitespace in between.
|
||||
IDENT(ast::Ident, bool),
|
||||
UNDERSCORE,
|
||||
LIFETIME(ast::Ident),
|
||||
|
@ -8,58 +8,56 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
/*
|
||||
* This pretty-printer is a direct reimplementation of Philip Karlton's
|
||||
* Mesa pretty-printer, as described in appendix A of
|
||||
*
|
||||
* STAN-CS-79-770: "Pretty Printing", by Derek C. Oppen.
|
||||
* Stanford Department of Computer Science, 1979.
|
||||
*
|
||||
* The algorithm's aim is to break a stream into as few lines as possible
|
||||
* while respecting the indentation-consistency requirements of the enclosing
|
||||
* block, and avoiding breaking at silly places on block boundaries, for
|
||||
* example, between "x" and ")" in "x)".
|
||||
*
|
||||
* I am implementing this algorithm because it comes with 20 pages of
|
||||
* documentation explaining its theory, and because it addresses the set of
|
||||
* concerns I've seen other pretty-printers fall down on. Weirdly. Even though
|
||||
* it's 32 years old. What can I say?
|
||||
*
|
||||
* Despite some redundancies and quirks in the way it's implemented in that
|
||||
* paper, I've opted to keep the implementation here as similar as I can,
|
||||
* changing only what was blatantly wrong, a typo, or sufficiently
|
||||
* non-idiomatic rust that it really stuck out.
|
||||
*
|
||||
* In particular you'll see a certain amount of churn related to INTEGER vs.
|
||||
* CARDINAL in the Mesa implementation. Mesa apparently interconverts the two
|
||||
* somewhat readily? In any case, I've used uint for indices-in-buffers and
|
||||
* ints for character-sizes-and-indentation-offsets. This respects the need
|
||||
* for ints to "go negative" while carrying a pending-calculation balance, and
|
||||
* helps differentiate all the numbers flying around internally (slightly).
|
||||
*
|
||||
* I also inverted the indentation arithmetic used in the print stack, since
|
||||
* the Mesa implementation (somewhat randomly) stores the offset on the print
|
||||
* stack in terms of margin-col rather than col itself. I store col.
|
||||
*
|
||||
* I also implemented a small change in the String token, in that I store an
|
||||
* explicit length for the string. For most tokens this is just the length of
|
||||
* the accompanying string. But it's necessary to permit it to differ, for
|
||||
* encoding things that are supposed to "go on their own line" -- certain
|
||||
* classes of comment and blank-line -- where relying on adjacent
|
||||
* hardbreak-like Break tokens with long blankness indication doesn't actually
|
||||
* work. To see why, consider when there is a "thing that should be on its own
|
||||
* line" between two long blocks, say functions. If you put a hardbreak after
|
||||
* each function (or before each) and the breaking algorithm decides to break
|
||||
* there anyways (because the functions themselves are long) you wind up with
|
||||
* extra blank lines. If you don't put hardbreaks you can wind up with the
|
||||
* "thing which should be on its own line" not getting its own line in the
|
||||
* rare case of "really small functions" or such. This re-occurs with comments
|
||||
* and explicit blank lines. So in those cases we use a string with a payload
|
||||
* we want isolated to a line and an explicit length that's huge, surrounded
|
||||
* by two zero-length breaks. The algorithm will try its best to fit it on a
|
||||
* line (which it can't) and so naturally place the content on its own line to
|
||||
* avoid combining it with other lines and making matters even worse.
|
||||
*/
|
||||
//! This pretty-printer is a direct reimplementation of Philip Karlton's
|
||||
//! Mesa pretty-printer, as described in appendix A of
|
||||
//!
|
||||
//! STAN-CS-79-770: "Pretty Printing", by Derek C. Oppen.
|
||||
//! Stanford Department of Computer Science, 1979.
|
||||
//!
|
||||
//! The algorithm's aim is to break a stream into as few lines as possible
|
||||
//! while respecting the indentation-consistency requirements of the enclosing
|
||||
//! block, and avoiding breaking at silly places on block boundaries, for
|
||||
//! example, between "x" and ")" in "x)".
|
||||
//!
|
||||
//! I am implementing this algorithm because it comes with 20 pages of
|
||||
//! documentation explaining its theory, and because it addresses the set of
|
||||
//! concerns I've seen other pretty-printers fall down on. Weirdly. Even though
|
||||
//! it's 32 years old. What can I say?
|
||||
//!
|
||||
//! Despite some redundancies and quirks in the way it's implemented in that
|
||||
//! paper, I've opted to keep the implementation here as similar as I can,
|
||||
//! changing only what was blatantly wrong, a typo, or sufficiently
|
||||
//! non-idiomatic rust that it really stuck out.
|
||||
//!
|
||||
//! In particular you'll see a certain amount of churn related to INTEGER vs.
|
||||
//! CARDINAL in the Mesa implementation. Mesa apparently interconverts the two
|
||||
//! somewhat readily? In any case, I've used uint for indices-in-buffers and
|
||||
//! ints for character-sizes-and-indentation-offsets. This respects the need
|
||||
//! for ints to "go negative" while carrying a pending-calculation balance, and
|
||||
//! helps differentiate all the numbers flying around internally (slightly).
|
||||
//!
|
||||
//! I also inverted the indentation arithmetic used in the print stack, since
|
||||
//! the Mesa implementation (somewhat randomly) stores the offset on the print
|
||||
//! stack in terms of margin-col rather than col itself. I store col.
|
||||
//!
|
||||
//! I also implemented a small change in the String token, in that I store an
|
||||
//! explicit length for the string. For most tokens this is just the length of
|
||||
//! the accompanying string. But it's necessary to permit it to differ, for
|
||||
//! encoding things that are supposed to "go on their own line" -- certain
|
||||
//! classes of comment and blank-line -- where relying on adjacent
|
||||
//! hardbreak-like Break tokens with long blankness indication doesn't actually
|
||||
//! work. To see why, consider when there is a "thing that should be on its own
|
||||
//! line" between two long blocks, say functions. If you put a hardbreak after
|
||||
//! each function (or before each) and the breaking algorithm decides to break
|
||||
//! there anyways (because the functions themselves are long) you wind up with
|
||||
//! extra blank lines. If you don't put hardbreaks you can wind up with the
|
||||
//! "thing which should be on its own line" not getting its own line in the
|
||||
//! rare case of "really small functions" or such. This re-occurs with comments
|
||||
//! and explicit blank lines. So in those cases we use a string with a payload
|
||||
//! we want isolated to a line and an explicit length that's huge, surrounded
|
||||
//! by two zero-length breaks. The algorithm will try its best to fit it on a
|
||||
//! line (which it can't) and so naturally place the content on its own line to
|
||||
//! avoid combining it with other lines and making matters even worse.
|
||||
|
||||
use std::io;
|
||||
use std::string::String;
|
||||
@ -186,107 +184,116 @@ pub fn mk_printer(out: Box<io::Writer>, linewidth: uint) -> Printer {
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* In case you do not have the paper, here is an explanation of what's going
|
||||
* on.
|
||||
*
|
||||
* There is a stream of input tokens flowing through this printer.
|
||||
*
|
||||
* The printer buffers up to 3N tokens inside itself, where N is linewidth.
|
||||
* Yes, linewidth is chars and tokens are multi-char, but in the worst
|
||||
* case every token worth buffering is 1 char long, so it's ok.
|
||||
*
|
||||
* Tokens are String, Break, and Begin/End to delimit blocks.
|
||||
*
|
||||
* Begin tokens can carry an offset, saying "how far to indent when you break
|
||||
* inside here", as well as a flag indicating "consistent" or "inconsistent"
|
||||
* breaking. Consistent breaking means that after the first break, no attempt
|
||||
* will be made to flow subsequent breaks together onto lines. Inconsistent
|
||||
* is the opposite. Inconsistent breaking example would be, say:
|
||||
*
|
||||
* foo(hello, there, good, friends)
|
||||
*
|
||||
* breaking inconsistently to become
|
||||
*
|
||||
* foo(hello, there
|
||||
* good, friends);
|
||||
*
|
||||
* whereas a consistent breaking would yield:
|
||||
*
|
||||
* foo(hello,
|
||||
* there
|
||||
* good,
|
||||
* friends);
|
||||
*
|
||||
* That is, in the consistent-break blocks we value vertical alignment
|
||||
* more than the ability to cram stuff onto a line. But in all cases if it
|
||||
* can make a block a one-liner, it'll do so.
|
||||
*
|
||||
* Carrying on with high-level logic:
|
||||
*
|
||||
* The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and
|
||||
* 'right' indices denote the active portion of the ring buffer as well as
|
||||
* describing hypothetical points-in-the-infinite-stream at most 3N tokens
|
||||
* apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch
|
||||
* between using 'left' and 'right' terms to denote the wrapepd-to-ring-buffer
|
||||
* and point-in-infinite-stream senses freely.
|
||||
*
|
||||
* There is a parallel ring buffer, 'size', that holds the calculated size of
|
||||
* each token. Why calculated? Because for Begin/End pairs, the "size"
|
||||
* includes everything between the pair. That is, the "size" of Begin is
|
||||
* actually the sum of the sizes of everything between Begin and the paired
|
||||
* End that follows. Since that is arbitrarily far in the future, 'size' is
|
||||
* being rewritten regularly while the printer runs; in fact most of the
|
||||
* machinery is here to work out 'size' entries on the fly (and give up when
|
||||
* they're so obviously over-long that "infinity" is a good enough
|
||||
* approximation for purposes of line breaking).
|
||||
*
|
||||
* The "input side" of the printer is managed as an abstract process called
|
||||
* SCAN, which uses 'scan_stack', 'scan_stack_empty', 'top' and 'bottom', to
|
||||
* manage calculating 'size'. SCAN is, in other words, the process of
|
||||
* calculating 'size' entries.
|
||||
*
|
||||
* The "output side" of the printer is managed by an abstract process called
|
||||
* PRINT, which uses 'print_stack', 'margin' and 'space' to figure out what to
|
||||
* do with each token/size pair it consumes as it goes. It's trying to consume
|
||||
* the entire buffered window, but can't output anything until the size is >=
|
||||
* 0 (sizes are set to negative while they're pending calculation).
|
||||
*
|
||||
* So SCAN takes input and buffers tokens and pending calculations, while
|
||||
* PRINT gobbles up completed calculations and tokens from the buffer. The
|
||||
* theory is that the two can never get more than 3N tokens apart, because
|
||||
* once there's "obviously" too much data to fit on a line, in a size
|
||||
* calculation, SCAN will write "infinity" to the size and let PRINT consume
|
||||
* it.
|
||||
*
|
||||
* In this implementation (following the paper, again) the SCAN process is
|
||||
* the method called 'pretty_print', and the 'PRINT' process is the method
|
||||
* called 'print'.
|
||||
*/
|
||||
/// In case you do not have the paper, here is an explanation of what's going
|
||||
/// on.
|
||||
///
|
||||
/// There is a stream of input tokens flowing through this printer.
|
||||
///
|
||||
/// The printer buffers up to 3N tokens inside itself, where N is linewidth.
|
||||
/// Yes, linewidth is chars and tokens are multi-char, but in the worst
|
||||
/// case every token worth buffering is 1 char long, so it's ok.
|
||||
///
|
||||
/// Tokens are String, Break, and Begin/End to delimit blocks.
|
||||
///
|
||||
/// Begin tokens can carry an offset, saying "how far to indent when you break
|
||||
/// inside here", as well as a flag indicating "consistent" or "inconsistent"
|
||||
/// breaking. Consistent breaking means that after the first break, no attempt
|
||||
/// will be made to flow subsequent breaks together onto lines. Inconsistent
|
||||
/// is the opposite. Inconsistent breaking example would be, say:
|
||||
///
|
||||
/// foo(hello, there, good, friends)
|
||||
///
|
||||
/// breaking inconsistently to become
|
||||
///
|
||||
/// foo(hello, there
|
||||
/// good, friends);
|
||||
///
|
||||
/// whereas a consistent breaking would yield:
|
||||
///
|
||||
/// foo(hello,
|
||||
/// there
|
||||
/// good,
|
||||
/// friends);
|
||||
///
|
||||
/// That is, in the consistent-break blocks we value vertical alignment
|
||||
/// more than the ability to cram stuff onto a line. But in all cases if it
|
||||
/// can make a block a one-liner, it'll do so.
|
||||
///
|
||||
/// Carrying on with high-level logic:
|
||||
///
|
||||
/// The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and
|
||||
/// 'right' indices denote the active portion of the ring buffer as well as
|
||||
/// describing hypothetical points-in-the-infinite-stream at most 3N tokens
|
||||
/// apart (i.e. "not wrapped to ring-buffer boundaries"). The paper will switch
|
||||
/// between using 'left' and 'right' terms to denote the wrapepd-to-ring-buffer
|
||||
/// and point-in-infinite-stream senses freely.
|
||||
///
|
||||
/// There is a parallel ring buffer, 'size', that holds the calculated size of
|
||||
/// each token. Why calculated? Because for Begin/End pairs, the "size"
|
||||
/// includes everything betwen the pair. That is, the "size" of Begin is
|
||||
/// actually the sum of the sizes of everything between Begin and the paired
|
||||
/// End that follows. Since that is arbitrarily far in the future, 'size' is
|
||||
/// being rewritten regularly while the printer runs; in fact most of the
|
||||
/// machinery is here to work out 'size' entries on the fly (and give up when
|
||||
/// they're so obviously over-long that "infinity" is a good enough
|
||||
/// approximation for purposes of line breaking).
|
||||
///
|
||||
/// The "input side" of the printer is managed as an abstract process called
|
||||
/// SCAN, which uses 'scan_stack', 'scan_stack_empty', 'top' and 'bottom', to
|
||||
/// manage calculating 'size'. SCAN is, in other words, the process of
|
||||
/// calculating 'size' entries.
|
||||
///
|
||||
/// The "output side" of the printer is managed by an abstract process called
|
||||
/// PRINT, which uses 'print_stack', 'margin' and 'space' to figure out what to
|
||||
/// do with each token/size pair it consumes as it goes. It's trying to consume
|
||||
/// the entire buffered window, but can't output anything until the size is >=
|
||||
/// 0 (sizes are set to negative while they're pending calculation).
|
||||
///
|
||||
/// So SCAN takes input and buffers tokens and pending calculations, while
|
||||
/// PRINT gobbles up completed calculations and tokens from the buffer. The
|
||||
/// theory is that the two can never get more than 3N tokens apart, because
|
||||
/// once there's "obviously" too much data to fit on a line, in a size
|
||||
/// calculation, SCAN will write "infinity" to the size and let PRINT consume
|
||||
/// it.
|
||||
///
|
||||
/// In this implementation (following the paper, again) the SCAN process is
|
||||
/// the method called 'pretty_print', and the 'PRINT' process is the method
|
||||
/// called 'print'.
|
||||
pub struct Printer {
|
||||
pub out: Box<io::Writer>,
|
||||
buf_len: uint,
|
||||
margin: int, // width of lines we're constrained to
|
||||
space: int, // number of spaces left on line
|
||||
left: uint, // index of left side of input stream
|
||||
right: uint, // index of right side of input stream
|
||||
token: Vec<Token> , // ring-buffr stream goes through
|
||||
size: Vec<int> , // ring-buffer of calculated sizes
|
||||
left_total: int, // running size of stream "...left"
|
||||
right_total: int, // running size of stream "...right"
|
||||
// pseudo-stack, really a ring too. Holds the
|
||||
// primary-ring-buffers index of the Begin that started the
|
||||
// current block, possibly with the most recent Break after that
|
||||
// Begin (if there is any) on top of it. Stuff is flushed off the
|
||||
// bottom as it becomes irrelevant due to the primary ring-buffer
|
||||
// advancing.
|
||||
/// Width of lines we're constrained to
|
||||
margin: int,
|
||||
/// Number of spaces left on line
|
||||
space: int,
|
||||
/// Index of left side of input stream
|
||||
left: uint,
|
||||
/// Index of right side of input stream
|
||||
right: uint,
|
||||
/// Ring-buffr stream goes through
|
||||
token: Vec<Token> ,
|
||||
/// Ring-buffer of calculated sizes
|
||||
size: Vec<int> ,
|
||||
/// Running size of stream "...left"
|
||||
left_total: int,
|
||||
/// Running size of stream "...right"
|
||||
right_total: int,
|
||||
/// Pseudo-stack, really a ring too. Holds the
|
||||
/// primary-ring-buffers index of the Begin that started the
|
||||
/// current block, possibly with the most recent Break after that
|
||||
/// Begin (if there is any) on top of it. Stuff is flushed off the
|
||||
/// bottom as it becomes irrelevant due to the primary ring-buffer
|
||||
/// advancing.
|
||||
scan_stack: Vec<uint> ,
|
||||
scan_stack_empty: bool, // top==bottom disambiguator
|
||||
top: uint, // index of top of scan_stack
|
||||
bottom: uint, // index of bottom of scan_stack
|
||||
// stack of blocks-in-progress being flushed by print
|
||||
/// Top==bottom disambiguator
|
||||
scan_stack_empty: bool,
|
||||
/// Index of top of scan_stack
|
||||
top: uint,
|
||||
/// Index of bottom of scan_stack
|
||||
bottom: uint,
|
||||
/// Stack of blocks-in-progress being flushed by print
|
||||
print_stack: Vec<PrintStackElem> ,
|
||||
// buffered indentation to avoid writing trailing whitespace
|
||||
/// Buffered indentation to avoid writing trailing whitespace
|
||||
pending_indentation: int,
|
||||
}
|
||||
|
||||
|
@ -88,9 +88,9 @@ pub static indent_unit: uint = 4u;
|
||||
|
||||
pub static default_columns: uint = 78u;
|
||||
|
||||
// Requires you to pass an input filename and reader so that
|
||||
// it can scan the input text for comments and literals to
|
||||
// copy forward.
|
||||
/// Requires you to pass an input filename and reader so that
|
||||
/// it can scan the input text for comments and literals to
|
||||
/// copy forward.
|
||||
pub fn print_crate<'a>(cm: &'a CodeMap,
|
||||
span_diagnostic: &diagnostic::SpanHandler,
|
||||
krate: &ast::Crate,
|
||||
|
@ -8,9 +8,9 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
// An "interner" is a data structure that associates values with uint tags and
|
||||
// allows bidirectional lookup; i.e. given a value, one can easily find the
|
||||
// type, and vice versa.
|
||||
//! An "interner" is a data structure that associates values with uint tags and
|
||||
//! allows bidirectional lookup; i.e. given a value, one can easily find the
|
||||
//! type, and vice versa.
|
||||
|
||||
use ast::Name;
|
||||
|
||||
|
@ -17,14 +17,14 @@ use parse::token;
|
||||
|
||||
use std::gc::Gc;
|
||||
|
||||
// map a string to tts, using a made-up filename:
|
||||
/// Map a string to tts, using a made-up filename:
|
||||
pub fn string_to_tts(source_str: String) -> Vec<ast::TokenTree> {
|
||||
let ps = new_parse_sess();
|
||||
filemap_to_tts(&ps,
|
||||
string_to_filemap(&ps, source_str, "bogofile".to_string()))
|
||||
}
|
||||
|
||||
// map string to parser (via tts)
|
||||
/// Map string to parser (via tts)
|
||||
pub fn string_to_parser<'a>(ps: &'a ParseSess, source_str: String) -> Parser<'a> {
|
||||
new_parser_from_source_str(ps,
|
||||
Vec::new(),
|
||||
@ -40,51 +40,51 @@ fn with_error_checking_parse<T>(s: String, f: |&mut Parser| -> T) -> T {
|
||||
x
|
||||
}
|
||||
|
||||
// parse a string, return a crate.
|
||||
/// Parse a string, return a crate.
|
||||
pub fn string_to_crate (source_str : String) -> ast::Crate {
|
||||
with_error_checking_parse(source_str, |p| {
|
||||
p.parse_crate_mod()
|
||||
})
|
||||
}
|
||||
|
||||
// parse a string, return an expr
|
||||
/// Parse a string, return an expr
|
||||
pub fn string_to_expr (source_str : String) -> Gc<ast::Expr> {
|
||||
with_error_checking_parse(source_str, |p| {
|
||||
p.parse_expr()
|
||||
})
|
||||
}
|
||||
|
||||
// parse a string, return an item
|
||||
/// Parse a string, return an item
|
||||
pub fn string_to_item (source_str : String) -> Option<Gc<ast::Item>> {
|
||||
with_error_checking_parse(source_str, |p| {
|
||||
p.parse_item(Vec::new())
|
||||
})
|
||||
}
|
||||
|
||||
// parse a string, return a stmt
|
||||
/// Parse a string, return a stmt
|
||||
pub fn string_to_stmt(source_str : String) -> Gc<ast::Stmt> {
|
||||
with_error_checking_parse(source_str, |p| {
|
||||
p.parse_stmt(Vec::new())
|
||||
})
|
||||
}
|
||||
|
||||
// parse a string, return a pat. Uses "irrefutable"... which doesn't
|
||||
// (currently) affect parsing.
|
||||
/// Parse a string, return a pat. Uses "irrefutable"... which doesn't
|
||||
/// (currently) affect parsing.
|
||||
pub fn string_to_pat(source_str: String) -> Gc<ast::Pat> {
|
||||
string_to_parser(&new_parse_sess(), source_str).parse_pat()
|
||||
}
|
||||
|
||||
// convert a vector of strings to a vector of ast::Ident's
|
||||
/// Convert a vector of strings to a vector of ast::Ident's
|
||||
pub fn strs_to_idents(ids: Vec<&str> ) -> Vec<ast::Ident> {
|
||||
ids.iter().map(|u| token::str_to_ident(*u)).collect()
|
||||
}
|
||||
|
||||
// does the given string match the pattern? whitespace in the first string
|
||||
// may be deleted or replaced with other whitespace to match the pattern.
|
||||
// this function is unicode-ignorant; fortunately, the careful design of
|
||||
// UTF-8 mitigates this ignorance. In particular, this function only collapses
|
||||
// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate unicode
|
||||
// chars. Unsurprisingly, it doesn't do NKF-normalization(?).
|
||||
/// Does the given string match the pattern? whitespace in the first string
|
||||
/// may be deleted or replaced with other whitespace to match the pattern.
|
||||
/// this function is unicode-ignorant; fortunately, the careful design of
|
||||
/// UTF-8 mitigates this ignorance. In particular, this function only collapses
|
||||
/// sequences of \n, \r, ' ', and \t, but it should otherwise tolerate unicode
|
||||
/// chars. Unsurprisingly, it doesn't do NKF-normalization(?).
|
||||
pub fn matches_codepattern(a : &str, b : &str) -> bool {
|
||||
let mut idx_a = 0;
|
||||
let mut idx_b = 0;
|
||||
@ -122,9 +122,9 @@ pub fn matches_codepattern(a : &str, b : &str) -> bool {
|
||||
}
|
||||
}
|
||||
|
||||
// given a string and an index, return the first uint >= idx
|
||||
// that is a non-ws-char or is outside of the legal range of
|
||||
// the string.
|
||||
/// Given a string and an index, return the first uint >= idx
|
||||
/// that is a non-ws-char or is outside of the legal range of
|
||||
/// the string.
|
||||
fn scan_for_non_ws_or_end(a : &str, idx: uint) -> uint {
|
||||
let mut i = idx;
|
||||
let len = a.len();
|
||||
@ -134,7 +134,7 @@ fn scan_for_non_ws_or_end(a : &str, idx: uint) -> uint {
|
||||
i
|
||||
}
|
||||
|
||||
// copied from lexer.
|
||||
/// Copied from lexer.
|
||||
pub fn is_whitespace(c: char) -> bool {
|
||||
return c == ' ' || c == '\t' || c == '\r' || c == '\n';
|
||||
}
|
||||
|
@ -8,6 +8,18 @@
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
//! Context-passing AST walker. Each overridden visit method has full control
|
||||
//! over what happens with its node, it can do its own traversal of the node's
|
||||
//! children (potentially passing in different contexts to each), call
|
||||
//! `visit::visit_*` to apply the default traversal algorithm (again, it can
|
||||
//! override the context), or prevent deeper traversal by doing nothing.
|
||||
//!
|
||||
//! Note: it is an important invariant that the default visitor walks the body
|
||||
//! of a function in "execution order" (more concretely, reverse post-order
|
||||
//! with respect to the CFG implied by the AST), meaning that if AST node A may
|
||||
//! execute before AST node B, then A is visited first. The borrow checker in
|
||||
//! particular relies on this property.
|
||||
//!
|
||||
use abi::Abi;
|
||||
use ast::*;
|
||||
use ast;
|
||||
@ -17,27 +29,15 @@ use owned_slice::OwnedSlice;
|
||||
|
||||
use std::gc::Gc;
|
||||
|
||||
// Context-passing AST walker. Each overridden visit method has full control
|
||||
// over what happens with its node, it can do its own traversal of the node's
|
||||
// children (potentially passing in different contexts to each), call
|
||||
// visit::visit_* to apply the default traversal algorithm (again, it can
|
||||
// override the context), or prevent deeper traversal by doing nothing.
|
||||
//
|
||||
// Note: it is an important invariant that the default visitor walks the body
|
||||
// of a function in "execution order" (more concretely, reverse post-order
|
||||
// with respect to the CFG implied by the AST), meaning that if AST node A may
|
||||
// execute before AST node B, then A is visited first. The borrow checker in
|
||||
// particular relies on this property.
|
||||
|
||||
pub enum FnKind<'a> {
|
||||
// fn foo() or extern "Abi" fn foo()
|
||||
/// fn foo() or extern "Abi" fn foo()
|
||||
FkItemFn(Ident, &'a Generics, FnStyle, Abi),
|
||||
|
||||
// fn foo(&self)
|
||||
/// fn foo(&self)
|
||||
FkMethod(Ident, &'a Generics, &'a Method),
|
||||
|
||||
// |x, y| ...
|
||||
// proc(x, y) ...
|
||||
/// |x, y| ...
|
||||
/// proc(x, y) ...
|
||||
FkFnBlock,
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user